ngram
listlengths
0
67.8k
[ "= round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}')", "1 def remaining_balance(min_monthly_payment_rate, balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann /", "def remaining_balance(min_monthly_payment_rate, balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12", "ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 * balance, 2)", "return balance - principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding balance on", "the minimum monthly payment rate as a decimal: \")) total_amount_paid = 0 for", "= float(input(\"Enter the minimum monthly payment rate as a decimal: \")) total_amount_paid =", "\")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate as a decimal: \"))", "${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid: ${round(total_amount_paid, 2)}')", "print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal", "the annual credit card interest rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter", "- principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding balance on your credit", "float(input(\"Enter the outstanding balance on your credit card: \")) annual_interest_rate = float(input(\"Enter the", "principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding balance on your credit card:", "as a decimal: \")) total_amount_paid = 0 for x in range(12): remaining_bal, min_monthly,", "min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly", "round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 * balance, 2) principal_paid = payment", "payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid +=", "float(input(\"Enter the annual credit card interest rate as a decimal: \")) monthly_payment_rate =", "principal_paid bal = float(input(\"Enter the outstanding balance on your credit card: \")) annual_interest_rate", "decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate as a decimal:", "remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2),", "2) interest_paid = round(ann / 12 * balance, 2) principal_paid = payment -", "rate as a decimal: \")) total_amount_paid = 0 for x in range(12): remaining_bal,", "remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum", "= float(input(\"Enter the outstanding balance on your credit card: \")) annual_interest_rate = float(input(\"Enter", "Problem 1 def remaining_balance(min_monthly_payment_rate, balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann", "monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid", "balance, 2) principal_paid = payment - interest_paid return balance - principal_paid, payment, principal_paid", "payment - interest_paid return balance - principal_paid, payment, principal_paid bal = float(input(\"Enter the", "annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}')", "outstanding balance on your credit card: \")) annual_interest_rate = float(input(\"Enter the annual credit", "round(ann / 12 * balance, 2) principal_paid = payment - interest_paid return balance", "bal = float(input(\"Enter the outstanding balance on your credit card: \")) annual_interest_rate =", "= float(input(\"Enter the annual credit card interest rate as a decimal: \")) monthly_payment_rate", "= remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin,", "annual credit card interest rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter the", "2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid:", "remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid: ${round(total_amount_paid, 2)}') print(f'Remaining balance: ${bal}')", "credit card interest rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum", "- interest_paid return balance - principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding", "${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount", "paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total", "range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal,", "print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid:", "card interest rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly", "2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining", "= round(ann / 12 * balance, 2) principal_paid = payment - interest_paid return", "minimum monthly payment rate as a decimal: \")) total_amount_paid = 0 for x", "payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 * balance, 2) principal_paid", "monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate as a decimal: \")) total_amount_paid", "\")) annual_interest_rate = float(input(\"Enter the annual credit card interest rate as a decimal:", "remaining_balance(min_monthly_payment_rate, balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 *", "balance on your credit card: \")) annual_interest_rate = float(input(\"Enter the annual credit card", "12 * balance, 2) principal_paid = payment - interest_paid return balance - principal_paid,", "on your credit card: \")) annual_interest_rate = float(input(\"Enter the annual credit card interest", "as a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate as", "rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate", "print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal", "print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\")", "credit card: \")) annual_interest_rate = float(input(\"Enter the annual credit card interest rate as", "bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid: ${round(total_amount_paid, 2)}') print(f'Remaining", "balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 * balance,", "balance - principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding balance on your", "for x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly,", "principal_paid = payment - interest_paid return balance - principal_paid, payment, principal_paid bal =", "= round(min_monthly_payment_rate*balance, 2) interest_paid = round(ann / 12 * balance, 2) principal_paid =", "the outstanding balance on your credit card: \")) annual_interest_rate = float(input(\"Enter the annual", "remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2)", "total_amount_paid = 0 for x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal,", "2) principal_paid = payment - interest_paid return balance - principal_paid, payment, principal_paid bal", "prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2),", "decimal: \")) total_amount_paid = 0 for x in range(12): remaining_bal, min_monthly, prin =", "payment rate as a decimal: \")) total_amount_paid = 0 for x in range(12):", "interest_paid = round(ann / 12 * balance, 2) principal_paid = payment - interest_paid", "card: \")) annual_interest_rate = float(input(\"Enter the annual credit card interest rate as a", "interest rate as a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment", "# Problem 1 def remaining_balance(min_monthly_payment_rate, balance, ann): payment = round(min_monthly_payment_rate*balance, 2) interest_paid =", "balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid: ${round(total_amount_paid,", "monthly payment rate as a decimal: \")) total_amount_paid = 0 for x in", "annual_interest_rate = float(input(\"Enter the annual credit card interest rate as a decimal: \"))", "float(input(\"Enter the minimum monthly payment rate as a decimal: \")) total_amount_paid = 0", "prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment:", "a decimal: \")) total_amount_paid = 0 for x in range(12): remaining_bal, min_monthly, prin", "round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}')", "bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month:", "your credit card: \")) annual_interest_rate = float(input(\"Enter the annual credit card interest rate", "2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}')", "a decimal: \")) monthly_payment_rate = float(input(\"Enter the minimum monthly payment rate as a", "interest_paid return balance - principal_paid, payment, principal_paid bal = float(input(\"Enter the outstanding balance", "payment, principal_paid bal = float(input(\"Enter the outstanding balance on your credit card: \"))", "in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin =", "/ 12 * balance, 2) principal_paid = payment - interest_paid return balance -", "x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin", "= payment - interest_paid return balance - principal_paid, payment, principal_paid bal = float(input(\"Enter", "round(remaining_bal, 2), round(min_monthly, 2), round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle", "= 0 for x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate)", "0 for x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal,", "= remaining_bal total_amount_paid += min_monthly print(\"RESULT\") print(f'Total amount paid: ${round(total_amount_paid, 2)}') print(f'Remaining balance:", "min_monthly, prin = remaining_balance(monthly_payment_rate, bal, annual_interest_rate) remaining_bal, min_monthly, prin = round(remaining_bal, 2), round(min_monthly,", "{x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal =", "\")) total_amount_paid = 0 for x in range(12): remaining_bal, min_monthly, prin = remaining_balance(monthly_payment_rate,", "${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance: ${remaining_bal}') bal = remaining_bal total_amount_paid += min_monthly", "round(prin, 2) print(f'Month: {x+1}') print(f'Minimum monthly payment: ${min_monthly}') print(f'Principle paid: ${prin}') print(f'Remaining balance:", "* balance, 2) principal_paid = payment - interest_paid return balance - principal_paid, payment," ]
[ "User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_", "('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5", "5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 -", "= { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2'", "Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos", "'1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User", "MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair", "Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag = 0", "core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5 -", "Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT = 2 TAG", "= 0 _fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}),", "'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ =", "minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1", "'KRB5 - Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended", "Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5", "} class InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields = [ ('thisMech',", "KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5", "{'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5 - Kerberos", "'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5", "_oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5 - Kerberos 5': AP_REQ, }", "User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', } class", "= 1 tag = 0 _fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool',", "Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX", "to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence):", "from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION =", "= 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10':", "UNIVERSAL = 0 APPLICATION = 1 CONTEXT = 2 TAG = 'explicit' class", "'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3':", "import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT", ": 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to", "{'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs =", "2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP", "import core from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0", "False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5 - Kerberos 5':", "_fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any,", "- Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 -", "Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5',", "'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1", "Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields = [", "- SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag", "tag = 0 _fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional':", "Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5", "= [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional':", "core from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION", "TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP -", "class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security", "] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = { 'KRB5 - Kerberos 5': AP_REQ,", "Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' :", "Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields", "{ #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' :", "= 0 APPLICATION = 1 CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier):", "EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT = 2 TAG =", "AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT = 2 TAG = 'explicit'", "Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 -", "krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT =", "5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX -", "False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech',", "1 tag = 0 _fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean,", "Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security", "{'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair =", "APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL = 0 APPLICATION = 1 CONTEXT = 2", "5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',", "0 APPLICATION = 1 CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map", "core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs", "class InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields = [ ('thisMech', MechType,", "0 _fields = [ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken',", "= 'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft", "1 CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = { #'':", "('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken')", "InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields = [ ('thisMech', MechType, {'optional':", "MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support", "'KRB5 - Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User',", "'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM", "'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft", "CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30',", "'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 -", "class_ = 1 tag = 0 _fields = [ ('thisMech', MechType, {'optional': False}),", "[ ('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}),", "False}), ('innerContextToken', core.Any, {'optional': False}), ] _oid_pair = ('thisMech', 'innerContextToken') _oid_specs = {", "- Kerberos 5', '1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30':", "#'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS", "APPLICATION = 1 CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map =", "- Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation", "Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag = 0 _fields =", "NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2'", "_map = { #'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',", "- Microsoft NTLM Security Support Provider', '1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos", "('thisMech', MechType, {'optional': False}), ('unk_bool', core.Boolean, {'optional': False}), ('innerContextToken', core.Any, {'optional': False}), ]", "'1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO", "'1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos", "asn1crypto import core from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL =", "= 1 CONTEXT = 2 TAG = 'explicit' class MechType(core.ObjectIdentifier): _map = {", "SPNEGO Extended Negotiation Security Mechanism', } class InitialContextToken(core.Sequence): class_ = 1 tag =", "from asn1crypto import core from minikerberos.protocol.asn1_structs import krb5int32, APOptions, Ticket, EncryptedData, AP_REQ UNIVERSAL", "- User to User', '1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism', }", ": 'MS KRB5 - Microsoft Kerberos 5', '1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5'," ]
[ "read_csv #from pandas import datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') #", "< best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue", "and return RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset X = X.astype('float32')", "model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error mse = mean_squared_error(test, predictions)", "from pandas import read_csv # evaluate an ARIMA model for a given order", "q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p in p_values:", "return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head())", "0.50) train, test = X[0:train_size], X[train_size:] history = [x for x in train]", "= ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate", "RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset X = X.astype('float32') train_size =", "q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse < best_score:", "DataFrame #from pandas import read_csv #from pandas import datetime # #def parser(x): #", "[x for x in train] # make predictions predictions = list() for t", "evaluate_arima_model(X, arima_order): # prepare training dataset X = X.astype('float32') train_size = int(len(X) *", "p_values = range(0,13) d_values = range(0, 4) q_values = range(0, 13) warnings.filterwarnings(\"ignore\") evaluate_models(series.values,", "x in train] # make predictions predictions = list() for t in range(len(test)):", "import sqrt from pandas import datetime from pandas import read_csv # evaluate an", "predictions predictions = list() for t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit", "q values for an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset =", "12:14:06 2018 @author: Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf", "model def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"),", "pandas import datetime from pandas import read_csv # evaluate an ARIMA model for", "= mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse # evaluate combinations of p,", "statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from pandas", "read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212)", "= (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg", "import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import", "p, d and q values for an ARIMA model def evaluate_models(dataset, p_values, d_values,", "statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from pandas import DataFrame #from pandas", "float(\"inf\"), None for p in p_values: for d in d_values: for q in", "mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg = mse, order", "#pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series from statsmodels.tsa.arima_model import", "pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math", "mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse # evaluate combinations of", "model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error mse", "read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13) d_values", "d_values = range(0, 4) q_values = range(0, 13) warnings.filterwarnings(\"ignore\") evaluate_models(series.values, p_values, d_values, q_values)", "#from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from pandas import DataFrame #from", "Tue Jun 19 12:14:06 2018 @author: Admin \"\"\" #from pandas import Series #from", "# #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas", "#from pandas import read_csv #from pandas import datetime # #def parser(x): # return", "rmse # evaluate combinations of p, d and q values for an ARIMA", "for q in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if", "a given order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): # prepare training", "= sqrt(mse) return rmse # evaluate combinations of p, d and q values", "if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse))", "datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) #", "utf-8 -*- \"\"\" Created on Tue Jun 19 12:14:06 2018 @author: Admin \"\"\"", "of p, d and q values for an ARIMA model def evaluate_models(dataset, p_values,", "import datetime from pandas import read_csv # evaluate an ARIMA model for a", "squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import", "% (best_cfg, best_score)) # load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series =", "series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values =", "#from pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from", "in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse <", "order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score,", "p_values: for d in d_values: for q in q_values: order = (p,d,q) try:", "(order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset def", "history.append(test[t]) # calculate out of sample error mse = mean_squared_error(test, predictions) rmse =", "= float(\"inf\"), None for p in p_values: for d in d_values: for q", "sklearn.metrics import mean_squared_error from math import sqrt from pandas import datetime from pandas", "\"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf", "list() for t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat", "Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot", "index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show()", "#plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series from statsmodels.tsa.arima_model import ARIMA", "d_values: for q in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order)", "import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import", "best_score)) # load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0,", "#from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from", "for d in d_values: for q in q_values: order = (p,d,q) try: mse", "MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load", "for p in p_values: for d in d_values: for q in q_values: order", "# -*- coding: utf-8 -*- \"\"\" Created on Tue Jun 19 12:14:06 2018", "an ARIMA model for a given order (p,d,q) and return RMSE def evaluate_arima_model(X,", "(best_cfg, best_score)) # load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv',", "\"\"\" Created on Tue Jun 19 12:14:06 2018 @author: Admin \"\"\" #from pandas", "plot_pacf #from matplotlib import pyplot #from pandas import DataFrame #from pandas import read_csv", "dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p in p_values: for d in", "X = X.astype('float32') train_size = int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:]", "return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate", "#pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import", "train, test = X[0:train_size], X[train_size:] history = [x for x in train] #", "#from pandas import DataFrame #from pandas import read_csv #from pandas import datetime #", "d and q values for an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values):", "date_parser=parser) # evaluate parameters p_values = range(0,13) d_values = range(0, 4) q_values =", "in d_values: for q in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset,", "#from pandas import datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series", "evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for", "header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13) d_values =", "pandas import DataFrame #from pandas import read_csv #from pandas import datetime # #def", "ARIMA model for a given order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order):", "test = X[0:train_size], X[train_size:] history = [x for x in train] # make", "parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca())", "predictions) rmse = sqrt(mse) return rmse # evaluate combinations of p, d and", "#pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series", "dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p in p_values: for", "datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters", "print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) #", "dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True,", "on Tue Jun 19 12:14:06 2018 @author: Admin \"\"\" #from pandas import Series", "prepare training dataset X = X.astype('float32') train_size = int(len(X) * 0.50) train, test", "ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d')", "# evaluate an ARIMA model for a given order (p,d,q) and return RMSE", "from math import sqrt from pandas import datetime from pandas import read_csv #", "ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series from statsmodels.tsa.arima_model import ARIMA from", "for a given order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): # prepare", "= dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p in p_values: for d", "from pandas import datetime from pandas import read_csv # evaluate an ARIMA model", "parameters p_values = range(0,13) d_values = range(0, 4) q_values = range(0, 13) warnings.filterwarnings(\"ignore\")", "#def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0,", "##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from", "index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13) d_values = range(0, 4)", "#plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series from", "= read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca())", "#from matplotlib import pyplot #from pandas import DataFrame #from pandas import read_csv #from", "pyplot #from pandas import DataFrame #from pandas import read_csv #from pandas import datetime", "rmse = sqrt(mse) return rmse # evaluate combinations of p, d and q", "try: mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg = mse,", "make predictions predictions = list() for t in range(len(test)): model = ARIMA(history, order=arima_order)", "header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series,", "def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)", "plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from pandas import DataFrame", "#from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from", "read_csv # evaluate an ARIMA model for a given order (p,d,q) and return", "given order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset", "# prepare training dataset X = X.astype('float32') train_size = int(len(X) * 0.50) train,", "int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:] history = [x for x", "for x in train] # make predictions predictions = list() for t in", "continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset def parser(x): return", "load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0,", "evaluate parameters p_values = range(0,13) d_values = range(0, 4) q_values = range(0, 13)", "arima_order): # prepare training dataset X = X.astype('float32') train_size = int(len(X) * 0.50)", "import datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv',", "predictions.append(yhat) history.append(test[t]) # calculate out of sample error mse = mean_squared_error(test, predictions) rmse", "= list() for t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0)", "= X.astype('float32') train_size = int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:] history", "mean_squared_error from math import sqrt from pandas import datetime from pandas import read_csv", "for t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat =", "= model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error", "and q values for an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset", "# load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0],", "Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt", "parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13) d_values = range(0,", "Jun 19 12:14:06 2018 @author: Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots", "ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg =", "sample error mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse # evaluate", "# evaluate parameters p_values = range(0,13) d_values = range(0, 4) q_values = range(0,", "import mean_squared_error from math import sqrt from pandas import datetime from pandas import", "pandas import read_csv #from pandas import datetime # #def parser(x): # return datetime.strptime(x,", "statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from pandas", "import read_csv #from pandas import datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d')", "warnings #from pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error", "history = [x for x in train] # make predictions predictions = list()", "X.astype('float32') train_size = int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:] history =", "# return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)", "sqrt(mse) return rmse # evaluate combinations of p, d and q values for", "evaluate combinations of p, d and q values for an ARIMA model def", "= int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:] history = [x for", "of sample error mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse #", "ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out", "evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f'", "matplotlib import pyplot #from pandas import DataFrame #from pandas import read_csv #from pandas", "except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset def parser(x):", "MSE=%.3f' % (best_cfg, best_score)) # load dataset def parser(x): return datetime.strptime(x, '%Y-%m-%d') series", "19 12:14:06 2018 @author: Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import", "= evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s", "mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg,", "ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from pandas import datetime", "p in p_values: for d in d_values: for q in q_values: order =", "mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except:", "order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score))", "an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg", "squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13) d_values = range(0, 4) q_values", "'%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10))", "in p_values: for d in d_values: for q in q_values: order = (p,d,q)", "q in q_values: order = (p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse", "datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0,", "#pyplot.show() import warnings #from pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics", "Created on Tue Jun 19 12:14:06 2018 @author: Admin \"\"\" #from pandas import", "import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib import pyplot #from pandas import", "import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from pandas import", "# make predictions predictions = list() for t in range(len(test)): model = ARIMA(history,", "import plot_pacf #from matplotlib import pyplot #from pandas import DataFrame #from pandas import", "coding: utf-8 -*- \"\"\" Created on Tue Jun 19 12:14:06 2018 @author: Admin", "datetime from pandas import read_csv # evaluate an ARIMA model for a given", "in range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat)", "# #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0],", "order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of", "% (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset", "for an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score,", "date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series, ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings", "from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from", "pandas import datetime # #def parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series =", "(p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset X =", "'%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values", "dataset X = X.astype('float32') train_size = int(len(X) * 0.50) train, test = X[0:train_size],", "evaluate an ARIMA model for a given order (p,d,q) and return RMSE def", "import pyplot #from pandas import DataFrame #from pandas import read_csv #from pandas import", "import read_csv # evaluate an ARIMA model for a given order (p,d,q) and", "def evaluate_arima_model(X, arima_order): # prepare training dataset X = X.astype('float32') train_size = int(len(X)", "Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import", "#series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211) #plot_acf(series,", "# #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) ##print(series.head()) # #pyplot.figure(figsize=(30,10)) #pyplot.subplot(211)", "import warnings #from pandas import Series from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import", "best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f'", "train_size = int(len(X) * 0.50) train, test = X[0:train_size], X[train_size:] history = [x", "predictions = list() for t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit =", "calculate out of sample error mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return", "math import sqrt from pandas import datetime from pandas import read_csv # evaluate", "order) if mse < best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' %", "return rmse # evaluate combinations of p, d and q values for an", "best_score, best_cfg = float(\"inf\"), None for p in p_values: for d in d_values:", "* 0.50) train, test = X[0:train_size], X[train_size:] history = [x for x in", "# calculate out of sample error mse = mean_squared_error(test, predictions) rmse = sqrt(mse)", "train] # make predictions predictions = list() for t in range(len(test)): model =", "out of sample error mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse", "mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse # evaluate combinations of p, d", "def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None", "-*- coding: utf-8 -*- \"\"\" Created on Tue Jun 19 12:14:06 2018 @author:", "import DataFrame #from pandas import read_csv #from pandas import datetime # #def parser(x):", "ax=pyplot.gca()) #pyplot.subplot(212) #plot_pacf(series, ax=pyplot.gca()) #pyplot.show() import warnings #from pandas import Series from statsmodels.tsa.arima_model", "sqrt from pandas import datetime from pandas import read_csv # evaluate an ARIMA", "model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) #", "training dataset X = X.astype('float32') train_size = int(len(X) * 0.50) train, test =", "pandas import read_csv # evaluate an ARIMA model for a given order (p,d,q)", "X[0:train_size], X[train_size:] history = [x for x in train] # make predictions predictions", "values for an ARIMA model def evaluate_models(dataset, p_values, d_values, q_values): dataset = dataset.astype('float32')", "= range(0,13) d_values = range(0, 4) q_values = range(0, 13) warnings.filterwarnings(\"ignore\") evaluate_models(series.values, p_values,", "best_cfg = float(\"inf\"), None for p in p_values: for d in d_values: for", "error mse = mean_squared_error(test, predictions) rmse = sqrt(mse) return rmse # evaluate combinations", "# evaluate combinations of p, d and q values for an ARIMA model", "best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s", "d in d_values: for q in q_values: order = (p,d,q) try: mse =", "model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample", "= mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best ARIMA%s MSE=%.3f' %", "-*- \"\"\" Created on Tue Jun 19 12:14:06 2018 @author: Admin \"\"\" #from", "2018 @author: Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from", "p_values, d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p", "= model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error mse = mean_squared_error(test,", "= X[0:train_size], X[train_size:] history = [x for x in train] # make predictions", "@author: Admin \"\"\" #from pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots", "model for a given order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): #", "return RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset X = X.astype('float32') train_size", "pandas import Series #from statsmodels.graphics.tsaplots import plot_acf #from statsmodels.graphics.tsaplots import plot_pacf #from matplotlib", "d_values, q_values): dataset = dataset.astype('float32') best_score, best_cfg = float(\"inf\"), None for p in", "best_score: best_score, best_cfg = mse, order print('ARIMA%s MSE=%.3f' % (order,mse)) except: continue print('Best", "parser(x): return datetime.strptime(x, '%Y-%m-%d') series = read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) #", "order (p,d,q) and return RMSE def evaluate_arima_model(X, arima_order): # prepare training dataset X", "= [x for x in train] # make predictions predictions = list() for", "t in range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0]", "combinations of p, d and q values for an ARIMA model def evaluate_models(dataset,", "= read_csv('data/recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser) # evaluate parameters p_values = range(0,13)", "parser(x): # return datetime.strptime(x, '%Y-%m-%d') # #series = read_csv('recom_train.csv', header=0, parse_dates=[0], index_col=0, squeeze=True,", "X[train_size:] history = [x for x in train] # make predictions predictions =", "yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t]) # calculate out of sample error mse =", "in train] # make predictions predictions = list() for t in range(len(test)): model", "print('Best ARIMA%s MSE=%.3f' % (best_cfg, best_score)) # load dataset def parser(x): return datetime.strptime(x,", "(p,d,q) try: mse = evaluate_arima_model(dataset, order) if mse < best_score: best_score, best_cfg =", "from sklearn.metrics import mean_squared_error from math import sqrt from pandas import datetime from", "range(0,13) d_values = range(0, 4) q_values = range(0, 13) warnings.filterwarnings(\"ignore\") evaluate_models(series.values, p_values, d_values,", "None for p in p_values: for d in d_values: for q in q_values:", "range(len(test)): model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) yhat = model_fit.forecast()[0] predictions.append(yhat) history.append(test[t])" ]
[ "batch_size) # Batch size is less than num_images, but does not divide evenly", "executed from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state =", "rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size", "num images, but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights),", "Should be executed from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self):", "than num_images, but does not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images,", "import sys ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy", "from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234)", "batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if __name__ == \"__main__\":", "size=[num_images, num_pixels]) # Batch size is greater than num images (shouldn't use batches)", "= os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import", "tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered analysis NOTE: Should", "= 50 num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer() model_weights =", "num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0,", "ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf", "= 2.0 rand_var = 10 num_images = 50 num_pixels = 12 num_neurons =", "less than num images, but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images,", "testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images = 50", "rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images = 50 num_pixels", "import Analyzer \"\"\" Test for activity triggered analysis NOTE: Should be executed from", "rand_mean = 2.0 rand_var = 10 num_images = 50 num_pixels = 12 num_neurons", "base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var,", "Batch size is less than num_images, but does not divide evenly batch_size =", "10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than", "= 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images =", "sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer", "Test for activity triggered analysis NOTE: Should be executed from the repository's root", "for activity triggered analysis NOTE: Should be executed from the repository's root directory", "Batch size is greater than num images (shouldn't use batches) batch_size = 100", "num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images", "ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images", "atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num", "model_weights), batch_size) # Batch size is less than num_images, but does not divide", "np.dot(images, model_weights), batch_size) # Batch size is less than num images, but divides", "as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered analysis NOTE:", "be executed from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state", "is greater than num images (shouldn't use batches) batch_size = 100 atas_1 =", "atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06,", "10 num_images = 50 num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer()", "sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import", "= np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images = 50 num_pixels =", "but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) #", "50 num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0,", "= base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06)", "\"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var =", "24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean,", "(shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) #", "import os import sys ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR)", "is less than num_images, but does not divide evenly batch_size = 13 atas_3", "NOTE: Should be executed from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def", "does not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)", "images (shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)", "is less than num images, but divides evenly batch_size = 10 atas_2 =", "= base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num images,", "not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf from", "num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than", "np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images = 50 num_pixels = 12", "DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered analysis NOTE: Should be executed", "activity triggered analysis NOTE: Should be executed from the repository's root directory \"\"\"", "if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as", "images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than num", "\"\"\" Test for activity triggered analysis NOTE: Should be executed from the repository's", "base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num_images, but does", "batch_size) # Batch size is less than num images, but divides evenly batch_size", "batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size", "use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch", "numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test", "in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer", "images, but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size)", "# Batch size is greater than num images (shouldn't use batches) batch_size =", "= 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1,", "np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if __name__", "Analyzer \"\"\" Test for activity triggered analysis NOTE: Should be executed from the", "evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06,", "# Batch size is less than num images, but divides evenly batch_size =", "repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean =", "divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2,", "12 num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons))", "root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0", "as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for", "self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if __name__ == \"__main__\": tf.test.main()", "ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np", "size is less than num_images, but does not divide evenly batch_size = 13", "base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num images, but", "def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10 num_images =", "not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1,", "= 10 num_images = 50 num_pixels = 12 num_neurons = 24 base_analyzer =", "rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than num images (shouldn't", "= rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than num images", "rand_var = 10 num_images = 50 num_pixels = 12 num_neurons = 24 base_analyzer", "evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size", "scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is", "the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean", "triggered analysis NOTE: Should be executed from the repository's root directory \"\"\" class", "base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if", "= 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less", "atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num_images,", "model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) #", "100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than", "= base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less than num_images, but", "directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var", "batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is", "tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered analysis", "less than num_images, but does not divide evenly batch_size = 13 atas_3 =", "num_images = 50 num_pixels = 12 num_neurons = 24 base_analyzer = Analyzer() model_weights", "Batch size is less than num images, but divides evenly batch_size = 10", "np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity", "sys ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as", "# Batch size is less than num_images, but does not divide evenly batch_size", "= rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch", "greater than num images (shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images,", "num_images, but does not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images,", "than num images, but divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images,", "= 12 num_neurons = 24 base_analyzer = Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels,", "but does not divide evenly batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights),", "size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater", "num_pixels]) # Batch size is greater than num images (shouldn't use batches) batch_size", "num images (shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights),", "np.dot(images, model_weights), batch_size) # Batch size is less than num_images, but does not", "batch_size = 13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06)", "13 atas_3 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3,", "= 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is less", "import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered", "model_weights), batch_size) # Batch size is less than num images, but divides evenly", "Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images, num_pixels])", "batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch size is", "model_weights), batch_size) self.assertAllClose(atas_1, atas_2, rtol=1e-06, atol=1e-06) self.assertAllClose(atas_1, atas_3, rtol=1e-06, atol=1e-06) if __name__ ==", "analysis NOTE: Should be executed from the repository's root directory \"\"\" class ActivityTriggeredAverageTest(tf.test.TestCase):", "size is greater than num images (shouldn't use batches) batch_size = 100 atas_1", "= Analyzer() model_weights = rand_state.normal(loc=0.0, scale=1.0, size=(num_pixels, num_neurons)) images = rand_state.normal(loc=rand_mean, scale=rand_var, size=[num_images,", "than num images (shouldn't use batches) batch_size = 100 atas_1 = base_analyzer.compute_atas(images, np.dot(images,", "size is less than num images, but divides evenly batch_size = 10 atas_2", "scale=rand_var, size=[num_images, num_pixels]) # Batch size is greater than num images (shouldn't use", "from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\" Test for activity triggered analysis NOTE: Should be", "divides evenly batch_size = 10 atas_2 = base_analyzer.compute_atas(images, np.dot(images, model_weights), batch_size) # Batch", "import numpy as np import tensorflow as tf from DeepSparseCoding.tf1x.analysis.base_analyzer import Analyzer \"\"\"", "os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import numpy as np import tensorflow", "os import sys ROOT_DIR = os.path.dirname(os.path.dirname(os.getcwd())) if ROOT_DIR not in sys.path: sys.path.append(ROOT_DIR) import", "class ActivityTriggeredAverageTest(tf.test.TestCase): def testBasic(self): rand_state = np.random.RandomState(1234) rand_mean = 2.0 rand_var = 10", "2.0 rand_var = 10 num_images = 50 num_pixels = 12 num_neurons = 24" ]
[ "if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss')", "optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif", "['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd:", "set_random_seed import time import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name", "or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss", "model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if", "set_random_seed(cfg) # list visible devices and use allow growth - updated for TF", "elif cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs = model(images) # compute", "bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss =", "pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad,", "# log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs)", "cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter)", "import absolute_import, division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..')", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss =", "loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss,", "cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps,", "timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else:", "if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns)", "if cfg.mode == 'us2conf': _, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None,", "= batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi'", "or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss =", "cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "memory_req=cfg.system.memory_req) # create model, loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse", "as tf from data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from", "cfg): # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs,", "'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter +=", "test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode ==", "cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss =", "tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss']", "= bce(dev_y, pred_devs) # log tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode", "== 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y,", "dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y", "= initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if", "Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _,", "\") if cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \")", "1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test", "{:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(),", "train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode", "tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if", "if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths)", "cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test datasets", "if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype))", "if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy()", "= ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde", "step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss',", "+= cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or cfg.data.use_fd:", "== \"__main__\": # load config file cfg = ConfigManager(json_name=args.json, retrain=True) # set random", "os import argparse import numpy as np import pandas as pd import tensorflow", "cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y,", "= ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load", "1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train", "FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss", "test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] =", "visible devices and use allow growth - updated for TF 2.7 (CUDA 11", "file cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing for no", "FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype))", "asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer model = initiate_model(cfg=cfg) if", "images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode ==", "for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch", "10 == 0 or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save", "test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None,", "cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss)", "if cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if", "if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy()", "ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess as", "if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer model = initiate_model(cfg=cfg)", "images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk *", "dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images,", "dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss',", "if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad)", "cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing for no random", "range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y =", "as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str)", "printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f}, T C L:", "{:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f},", "ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights)", "if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss')", "train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen:", "test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()),", "if cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None,", "+= 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk:", "C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd:", "cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg)", "tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if", "ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde =", "L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L:", "or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif", "L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L:", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to", "folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if asked", "images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: #", "'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss", "{:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(),", "if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if", "test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "= test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1),", "dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y,", "test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()),", "T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T", "if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss')", "import tensorflow as tf from data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError,", "if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns", "print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev", "import argparse import numpy as np import pandas as pd import tensorflow as", "= tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter = 0 print('Start training...') for", "== 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks", "= tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss')", "conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C", "loss = q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss", "# log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log", "model, loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if", "4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss =", "for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode == 'us2conf': _,", "# add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk", "cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode ==", "== 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs = model(images)", "test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\"", "ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights", "wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf:", "cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD", "cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns +=", "train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape: # get predictions", "data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y,", "fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss if", "'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss +", "def test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get predictions if cfg.mode ==", "L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L:", "cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss # add FK", "cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \")", "% 10 == 0 or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) #", "cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) #", "test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode", "__future__ import absolute_import, division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.')", "sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import numpy", "json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function to train @tf.function", "test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) #", "metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \")", "cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None,", "else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK", "+ CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if", "dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images,", "+ dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss", "cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss", "dev_x, dev_y, cfg): with tf.GradientTape() as tape: # get predictions if cfg.mode ==", "== 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss", "dev_loss # add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss +=", "losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode", "tensorflow as tf from data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError", "== 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode ==", "q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y,", "not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if asked if cfg.store_csv: metric_df_columns", "step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss',", "{:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) %", "import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import", "row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev:", "step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y =", "dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y,", "tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss =", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None, images=images,", "if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(),", "{:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f},", "or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: #", "if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns", "== 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss", "train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD", "if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode", "if cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if", "= tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss", "dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y", "cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd", "dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with", "step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if", "data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager,", "import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time", "train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode", "if cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if", "* fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss", "tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if", "(epoch+1) % cfg.training.cp_interval == 0 and epoch > 0: print('Saving weights to {}'.format(cfg.output_dir))", "11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check", "== 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval", "(do nothing for no random seed) set_random_seed(cfg) # list visible devices and use", "dev_loss = bce(dev_y, pred_devs) # log tasks losses if cfg.mode == 'us2conf2multimidi' or", "train_counter = 0 test_counter = 0 print('Start training...') for epoch in range(cfg.training.epochs): for", "FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd:", "train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf:", "gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer model =", "cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform", "cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter)", "print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(),", "pandas as pd import tensorflow as tf from data.load import DataManagement from trainer.losses", "= mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode", "pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode ==", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images,", "{:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(),", "cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps)", "metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss']", "= bce(dev_y, pred_devs) loss = dev_loss # add FK loss if cfg.data.use_fk: fk_loss", "Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] =", "elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y =", "if cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\"", "= mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif", "if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test", "end=\" \") print(\" \") if (epoch+1) % 10 == 0 or epoch ==", "cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss", "print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import", "train train_counter = 0 test_counter = 0 print('Start training...') for epoch in range(cfg.training.epochs):", "loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train", "test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD", "'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f},", "{:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(),", "= train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy()", "row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] =", "conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi'", "optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode ==", "csv of training if asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns", "['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and", "\"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss')", "if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(),", "row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] =", "test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get predictions if", "test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load config file cfg = ConfigManager(json_name=args.json,", "@tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get predictions if cfg.mode", "if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss')", "== 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter", "loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images,", "FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv:", "= tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss')", "if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train", "create model, loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError()", "task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y,", "q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss", "if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns", "sys.path.append('..') import os import argparse import numpy as np import pandas as pd", "or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd *", "cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss", "# load train and test datasets data_mng = DataManagement(cfg=cfg) # wait for gpu", "save model if (epoch+1) % cfg.training.cp_interval == 0 and epoch > 0: print('Saving", "cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) #", "train_dev_loss(dev_loss) # tf function to test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y,", "add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if", "# save model if (epoch+1) % cfg.training.cp_interval == 0 and epoch > 0:", "cfg.training.cp_interval == 0 and epoch > 0: print('Saving weights to {}'.format(cfg.output_dir)) model.save_weights(os.path.join(cfg.output_dir, \"model{}.ckpt\".format(epoch+1)))", "FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T CD", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss =", "# load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf:", "prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import numpy as np import pandas", "np import pandas as pd import tensorflow as tf from data.load import DataManagement", "pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load config file cfg =", "with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK", "growth - updated for TF 2.7 (CUDA 11 + CUDNN 8.2) gpus =", "cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns +=", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else:", "cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss", "= argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args()", "list visible devices and use allow growth - updated for TF 2.7 (CUDA", "cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or", "args = parser.parse_args() # tf function to train @tf.function def train(timestamps, images, conf_y,", "cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter)", "devices and use allow growth - updated for TF 2.7 (CUDA 11 +", "if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode ==", "loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy()", "tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if", "import time import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images,", "batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode", "cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ ==", "config file cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing for", "# log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs)", "= pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng = DataManagement(cfg=cfg) # wait", "cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode", "train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter =", "= ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing for no random seed)", "= test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss']", "metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf:", "perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi'", "'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) #", "metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter = 0 print('Start training...')", "= dev_loss # add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss", "# perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode ==", "train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or", "DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model,", "if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss +=", "cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke =", "TF 2.7 (CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu],", "cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd:", "parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf", "cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad)", "if __name__ == \"__main__\": # load config file cfg = ConfigManager(json_name=args.json, retrain=True) #", "loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps,", "as np import pandas as pd import tensorflow as tf from data.load import", "cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) #", "print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD", "'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None,", "if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd *", "loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or", "argparse import numpy as np import pandas as pd import tensorflow as tf", "tf.GradientTape() as tape: # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs", "import pandas as pd import tensorflow as tf from data.load import DataManagement from", "loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss", "or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: #", "['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev:", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf':", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss", "L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T FK L:", "True) # check if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate", "cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns +=", "== 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None,", "q_loss.dtype)) if __name__ == \"__main__\": # load config file cfg = ConfigManager(json_name=args.json, retrain=True)", "images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "= train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy()", "with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK", "C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T FK", "'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs = model(images) else:", "= cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd:", "of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function to train", "pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step gradients =", "load train and test datasets data_mng = DataManagement(cfg=cfg) # wait for gpu if", "cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter)", "\") if cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \")", "loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test", "dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode ==", "dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode == 'us2conf':", "L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T CD L:", "= fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step", "trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import", "'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks losses if cfg.mode == 'us2conf2multimidi'", "step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss',", "= test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict,", "dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images,", "images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) #", "CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd:", "test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode", "as tape: # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "+= ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train", "print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK", "import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file',", "# add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs)", "== 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None,", "fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss", "retrain=True) # set random seed (do nothing for no random seed) set_random_seed(cfg) #", "initiate csv of training if asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf:", "= train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy()", "index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f}, T", "nothing for no random seed) set_random_seed(cfg) # list visible devices and use allow", "elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss else: #", "tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) #", "+= ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if", "sys.path.append('.') sys.path.append('..') import os import argparse import numpy as np import pandas as", "datasets data_mng = DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) #", "cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss", "model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode", "'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function def", "fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights !=", "test_counter = 0 print('Start training...') for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen:", "no random seed) set_random_seed(cfg) # list visible devices and use allow growth -", "tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer", "= model(images) else: pred_devs = model(images) # compute task losses if cfg.mode ==", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs)", "fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd:", "= tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter = 0", "cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk:", "== 'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode ==", "to test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get predictions", "= q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss =", "if cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None,", "else: pred_devs = model(images) # compute task losses if cfg.mode == 'us2conf2multimidi' or", "division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os", "pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss =", "tf function to train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y,", "cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y", "training if asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss']", "time import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json", "test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get predictions if cfg.mode == 'us2conf2multimidi'", "loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test", "cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None,", "== 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode ==", "q_loss = mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode == 'us2multimidi' or", "loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss,", "cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD", "# tf function to train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg):", "tf function to test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg): #", "pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd", "if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training", "metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng", "cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if", "tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss =", "= fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load config file", "if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy()", "= metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if", "trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess as sp parser", "random seed) set_random_seed(cfg) # list visible devices and use allow growth - updated", "wait_for_gpu, initiate_model, set_random_seed import time import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json',", "train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if", "cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate)", "test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else:", "q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps,", "bce(dev_y, pred_devs) loss = dev_loss # add FK loss if cfg.data.use_fk: fk_loss =", "if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd", "print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval == 0", "images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1", "os.makedirs(cfg.output_dir) # initiate csv of training if asked if cfg.store_csv: metric_df_columns = []", "cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs = model(images) # compute task", "train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev", "check if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of", "FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch", "train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss =", "parser.parse_args() # tf function to train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y,", "allow growth - updated for TF 2.7 (CUDA 11 + CUDNN 8.2) gpus", "and use allow growth - updated for TF 2.7 (CUDA 11 + CUDNN", "# compute task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss", "epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode == 'us2conf': _, images,", "model(images) elif cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs = model(images) #", "type=str) args = parser.parse_args() # tf function to train @tf.function def train(timestamps, images,", "cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y =", "log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD", "log tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss)", "+= 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk:", "dev_y, cfg): # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss)", "asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk:", "fd_loss train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if", "\") if cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \")", "= tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0", "== 0 or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model", "'us2conf': _, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif", "L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) % 10 == 0", "fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step gradients", "== 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss)", "file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function to train @tf.function def", "cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode", "cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks losses if cfg.mode", "images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test", "or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks losses if", "q_loss = mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if asked if", "if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if", "images, conf_y, dev_x, dev_y, cfg): # get predictions if cfg.mode == 'us2conf2multimidi' or", "if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir,", "pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs)", "loss = dev_loss # add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs)", "'-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function", "= ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\":", "arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg,", "# load config file cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed (do", "with tf.GradientTape() as tape: # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode", "from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess as sp", "mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y,", "or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1)", "ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import", "if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif", "metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss',", "CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev:", "step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode", "# tf function to test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg):", "or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log", "# log tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss)", "elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss']", "+= ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df", "if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(),", "dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(),", "test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter =", "cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None,", "parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args =", "'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks losses", "loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev:", "tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists if not", "_, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode", "cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter)", "bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode", "for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer model", "= mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss", "bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd:", "or cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y,", "train and test datasets data_mng = DataManagement(cfg=cfg) # wait for gpu if asked", "if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(),", "log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss,", "use allow growth - updated for TF 2.7 (CUDA 11 + CUDNN 8.2)", "metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss']", "Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval == 0 and epoch", "pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss", "end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\"", "cfg.mode == 'us2conf': _, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None,", "gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists", "model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True)", "cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch", "ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess", "loss = 4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs)", "if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss')", "pred_devs) # log tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "import ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess as sp parser =", "and test datasets data_mng = DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu),", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to test", "tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter = 0 print('Start training...') for epoch", "test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi'", "cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns +=", "to train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss", "CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T FD", "row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch", "pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs =", "test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log", "optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce =", "== 'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function def test(timestamps, images, conf_y,", "mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode", "get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs =", "conf_y, dev_x, dev_y, cfg): # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode", "metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss =", "if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(),", "tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if", "== 'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter", "mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd:", "= train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy()", "pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y,", "== 'us2conf': pred_confs = model(images) else: pred_devs = model(images) # compute task losses", "cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter)", "cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": #", "= tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir)", "load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss", "train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df", "FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss", "CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output", "cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients,", "'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg)", "pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs", "loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in", "cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss =", "\") if (epoch+1) % 10 == 0 or epoch == 0: print('GPU: {},", "train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if", "'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi'", "elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi'", "loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if", "end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\"", "if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd", "pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng = DataManagement(cfg=cfg) # wait for", "weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss =", "tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if", "else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch", "cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter)", "+= ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng =", "random seed (do nothing for no random seed) set_random_seed(cfg) # list visible devices", "fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization", "Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) % 10 ==", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf':", "cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y", "cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode", "T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) % 10", "= tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss", "= test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi'", "test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode", "row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] =", "dev_y, cfg): with tf.GradientTape() as tape: # get predictions if cfg.mode == 'us2conf2multimidi'", "arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) #", "output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if", "test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y,", "tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif", "= bce(dev_y, pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss", "= tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval ==", "= test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or", "model if (epoch+1) % cfg.training.cp_interval == 0 and epoch > 0: print('Saving weights", "test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD", "L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\" \") if cfg.data.use_fk: print('FK L:", "cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss", "test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()),", "elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T Dev", "if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss", "from __future__ import absolute_import, division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope", "0 test_counter = 0 print('Start training...') for epoch in range(cfg.training.epochs): for batch in", "conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode", "cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function def test(timestamps, images,", "test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd:", "test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) #", "q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y,", "seed (do nothing for no random seed) set_random_seed(cfg) # list visible devices and", "cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if", "train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss =", "cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if", "test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer =", "'us2conf': pred_confs = model(images) else: pred_devs = model(images) # compute task losses if", "tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists if not os.path.isdir(cfg.output_dir):", "loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd", "train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss =", "tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode", "if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg)", "tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss =", "batch in data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y = batch train(timestamps=None,", "load config file cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing", "'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf':", "fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD", "test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True)", "test datasets data_mng = DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req)", "dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss',", "'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y = batch train(timestamps=None, images=images, conf_y=None, dev_x=None,", "cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add", "DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss", "mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss =", "train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss =", "test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] =", "{}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval == 0 and epoch >", "loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss)", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch", "else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs)", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else:", "= test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default():", "C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd:", "cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load", "dev_loss = bce(dev_y, pred_devs) loss = dev_loss # add FK loss if cfg.data.use_fk:", "tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if", "train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y,", "# set random seed (do nothing for no random seed) set_random_seed(cfg) # list", "test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode", "row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd:", "= train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) #", "{:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f},", "test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] =", "FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss +=", "optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or", "+= cfg.data.fd * fd_loss train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss, model.trainable_variables)", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps,", "test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or", "initiate_model, set_random_seed import time import subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json',", "print('Dev L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if", "{:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f},", "'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg)", "T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T", "dev_x, dev_y, cfg): # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns", "test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict =", "'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function def test(timestamps, images, conf_y, dev_x,", "train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y", "loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train", "== 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function", "step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] =", "of training if asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns +=", "images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape: # get predictions if", "tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import numpy as np", "training...') for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode == 'us2conf':", "= fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) # add CD loss", "cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y =", "or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer =", "= test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss']", "{} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] =", "= tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde", "tf from data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils", "# wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and", "['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df =", "import numpy as np import pandas as pd import tensorflow as tf from", "= [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if", "= 4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss", "loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train", "'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images,", "test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()),", "if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(),", "and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce", "cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter)", "'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss #", "8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder", "or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function to test @tf.function def test(timestamps,", "batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or", "ConfigManager(json_name=args.json, retrain=True) # set random seed (do nothing for no random seed) set_random_seed(cfg)", "test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if", "tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if", "pred_confs) loss = q_loss else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf function", "metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L:", "os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if asked if cfg.store_csv: metric_df_columns =", "L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev: print('Dev L: {:.5f}, T Dev L:", "tape: # get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs,", "= tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths)", "step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss',", "{},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()),", "- updated for TF 2.7 (CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU')", "bce(dev_y, pred_devs) # log tasks losses if cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "2.7 (CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True)", "fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or cfg.data.use_fd:", "test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd:", "dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps, test_images,", "train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y,", "def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape: # get", "wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model, loss and optimizer", "test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf:", "'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode == 'us2conf': train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi'", "cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss']", "{:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) % 10 == 0 or", "conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps,", "'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss # add FK loss if", "cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or", "\") print(\" \") if (epoch+1) % 10 == 0 or epoch == 0:", "fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load config", "pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode ==", "= tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights != \"\": model.load_weights(cfg.model.weights) # define metrics", "cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf:", "{}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) % cfg.training.cp_interval == 0 and", "cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf':", "as pd import tensorflow as tf from data.load import DataManagement from trainer.losses import", "if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy()", "fk_loss train_fk_loss(fk_loss) # add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss =", "if (epoch+1) % 10 == 0 or epoch == 0: print('GPU: {}, Experiment:", "if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke", "pred_devs) loss = dev_loss # add FK loss if cfg.data.use_fk: fk_loss = fke(conf_y,", "if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv of training if asked if cfg.store_csv:", "test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev", "= {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk: row_dict['train_fk_loss']", "for no random seed) set_random_seed(cfg) # list visible devices and use allow growth", "['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng = DataManagement(cfg=cfg)", "@tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape: #", "'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) #", "test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode ==", "end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\"", "'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode ==", "if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy()", "if asked if cfg.store_csv: metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if", "train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if", "or cfg.mode == 'us2conf2multikey': test_timestamps, test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y,", "cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode", "= batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default():", "# initiate csv of training if asked if cfg.store_csv: metric_df_columns = [] if", "== 'us2conf2multikey': q_loss = mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode ==", "== 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss # add FK loss", "'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss else: # cfg.mode == 'us2multimidi'", "0 print('Start training...') for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode", "== 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss)", "!= \"\": model.load_weights(cfg.model.weights) # define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss =", "or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y,", "= batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or", "numpy as np import pandas as pd import tensorflow as tf from data.load", "if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) loss += cfg.data.fk * fk_loss train_fk_loss(fk_loss) #", "data_mng = DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create", "cd_loss = cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) #", "from data.load import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import", "cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter)", "train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD", "dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else:", "fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\": # load config file cfg", "(CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) #", "= tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss = tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss')", "= fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps)", "= DataManagement(cfg=cfg) # wait for gpu if asked wait_for_gpu(gpu=str(cfg.system.gpu), memory_req=cfg.system.memory_req) # create model,", "test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if", "conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Test C", "CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev:", "function to test @tf.function def test(timestamps, images, conf_y, dev_x, dev_y, cfg): # get", "for batch in data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y = batch", "row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] =", "fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss)", "step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {}", "== 'us2conf': _, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg)", "pred_confs = model(images) else: pred_devs = model(images) # compute task losses if cfg.mode", "= tf.keras.metrics.Mean(name='train_fk_loss') test_fk_loss = tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss')", "import os import argparse import numpy as np import pandas as pd import", "or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y,", "conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape: # get predictions if cfg.mode", "# train train_counter = 0 test_counter = 0 print('Start training...') for epoch in", "loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test", "% cfg.training.cp_interval == 0 and epoch > 0: print('Saving weights to {}'.format(cfg.output_dir)) model.save_weights(os.path.join(cfg.output_dir,", "* cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss", "\"__main__\": # load config file cfg = ConfigManager(json_name=args.json, retrain=True) # set random seed", "= parser.parse_args() # tf function to train @tf.function def train(timestamps, images, conf_y, dev_x,", "= tf.keras.metrics.Mean(name='test_fk_loss') if cfg.data.use_cd: train_cd_loss = tf.keras.metrics.Mean(name='train_cd_loss') test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss", "= 0 test_counter = 0 print('Start training...') for epoch in range(cfg.training.epochs): for batch", "dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(),", "step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss',", "if cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(),", "in data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images,", "\") if cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \")", "loss', test_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Test Dev loss', test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict", "tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter = 0 print('Start", "train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing", "print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD", "tf.summary.scalar('Test C loss', test_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if", "metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss']", "cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \") if cfg.use_dev:", "in data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y = batch train(timestamps=None, images=images,", "test_images, test_conf_y, test_dev_y = test_batch test(timestamps=test_timestamps, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: #", "[] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd:", "model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss)", "# check if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir) # initiate csv", "for TF 2.7 (CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU')", "mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg,", "images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Test FD loss', test_fd_loss.result(), step=test_counter) if", "test_dev_loss.result(), step=test_counter) if cfg.store_csv: row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss']", "set random seed (do nothing for no random seed) set_random_seed(cfg) # list visible", "print('Start training...') for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode ==", "== 'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or cfg.mode", "cfg.data.use_cd or cfg.data.use_fd: cde = ConfigurationDynamicsError(cfg=cfg) if cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer", "batch train(timestamps=None, images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if", "cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter", "row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if cfg.data.use_cd:", "seed) set_random_seed(cfg) # list visible devices and use allow growth - updated for", "or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) loss = dev_loss # add", "from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import numpy as", "tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables)) if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss)", "metric_df_columns += ['train_cd_loss','test_cd_loss'] if cfg.use_dev: metric_df_columns += ['train_dev_loss','test_dev_loss'] metric_df = pd.DataFrame(columns=metric_df_columns) # load", "_, images, conf_y = batch train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode", "\") if cfg.data.use_fd: print('FD L: {:.5f}, T FD L: {:.5f}.'.format(train_fd_loss.result(), test_fd_loss.result()), end=\" \")", "sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args", "= model(images) # compute task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss =", "model(images) # compute task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey':", "<gh_stars>0 from __future__ import absolute_import, division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import", "= model(images) elif cfg.mode == 'us2conf': pred_confs = model(images) else: pred_devs = model(images)", "* fd_loss train_fd_loss(fd_loss) # perform optimization step gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(gradients, model.trainable_variables))", "cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else: # cfg.mode == 'us2multimidi' or", "cfg.data.use_fd: fde = ForwardDynamicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) optimizer = tf.keras.optimizers.Adam(learning_rate=cfg.training.learning_rate) # load weights if cfg.model.weights", "define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk: train_fk_loss", "+= cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad,", "'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter +=", "test_cd_loss = tf.keras.metrics.Mean(name='test_cd_loss') if cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev:", "cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg)", "# get predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs", "print(\" \") if (epoch+1) % 10 == 0 or epoch == 0: print('GPU:", "# cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_images, test_dev_y = test_batch test(timestamps=None,", "in range(cfg.training.epochs): for batch in data_mng.train_gen: if cfg.mode == 'us2conf': _, images, conf_y", "cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images) elif cfg.mode == 'us2conf': pred_confs =", "ConfigManager, wait_for_gpu, initiate_model, set_random_seed import time import subprocess as sp parser = argparse.ArgumentParser()", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss if", "# list visible devices and use allow growth - updated for TF 2.7", "== 'us2conf2multikey': timestamps, images, conf_y, dev_y = batch train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y,", "if cfg.data.use_fk: tf.summary.scalar('Train FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(),", "= tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]], 'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists if", "train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss'] = test_fd_loss.result().numpy() if", "L: {:.5f}, T Dev L: {:.5f}.'.format(train_dev_loss.result(), test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1)", "conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "train(timestamps=None, images=images, conf_y=conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode ==", "train(timestamps=timestamps, images=images, conf_y=conf_y, dev_x=None, dev_y=dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode", "data_mng.test_gen: if cfg.mode == 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y,", "train_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' train_dev_loss(dev_loss) # tf", "test_c_loss(q_loss) test_dev_loss(dev_loss) elif cfg.mode == 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or", "# define metrics if cfg.use_conf: train_c_loss = tf.keras.metrics.Mean(name='train_c_loss') test_c_loss = tf.keras.metrics.Mean(name='test_c_loss') if cfg.data.use_fk:", "tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for", "predictions if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': pred_confs, pred_devs = model(images)", "else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' images, dev_y = batch", "pd import tensorflow as tf from data.load import DataManagement from trainer.losses import ForwardKinematicsError,", "== 'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs)", "== 'us2conf': test_c_loss(q_loss) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss)", "if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add FD loss if", "pred_devs = model(images) # compute task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode", "tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss = tf.keras.metrics.Mean(name='test_dev_loss') metrics_writer = tf.summary.create_file_writer(cfg.output_dir) # train train_counter = 0 test_counter", "function to train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape()", "elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': timestamps, images, conf_y, dev_y =", "unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse", "= bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) else: #", "FK loss', train_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd:", "# create model, loss and optimizer model = initiate_model(cfg=cfg) if cfg.use_conf: mse =", "conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': test_timestamps,", "T CD L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T", "# printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C L: {:.5f}, T C", "= 0 print('Start training...') for epoch in range(cfg.training.epochs): for batch in data_mng.train_gen: if", "row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df =", "absolute_import, division, print_function, unicode_literals import sys from tensorflow.python.framework.ops import prepend_name_scope sys.path.append('.') sys.path.append('..') import", "if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__", "== 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log tasks losses if cfg.mode ==", "compute task losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss =", "losses if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': q_loss = mse(conf_y, pred_confs)", "test_images, test_dev_y = test_batch test(timestamps=None, images=test_images, conf_y=None, dev_x=None, dev_y=test_dev_y, cfg=cfg) test_counter += 1", "row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False)", "cfg.data.use_fd: tf.summary.scalar('Train FD loss', train_fd_loss.result(), step=test_counter) if cfg.use_dev: tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter)", "pred_devs) loss = 4*q_loss + dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y,", "+= ['train_fk_loss','test_fk_loss'] if cfg.data.use_fd: metric_df_columns += ['train_fd_loss','test_fd_loss'] if cfg.data.use_cd: metric_df_columns += ['train_cd_loss','test_cd_loss'] if", "cfg.data.use_fd: train_fd_loss = tf.keras.metrics.Mean(name='train_fd_loss') test_fd_loss = tf.keras.metrics.Mean(name='test_fd_loss') if cfg.use_dev: train_dev_loss = tf.keras.metrics.Mean(name='train_dev_loss') test_dev_loss", "test_dev_loss.result()), end=\" \") print(\" \") if (epoch+1) % 10 == 0 or epoch", "tf.summary.scalar('Train Dev loss', train_dev_loss.result(), step=test_counter) for test_batch in data_mng.test_gen: if cfg.mode == 'us2conf':", "cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd:", "cfg.data.use_fk: row_dict['train_fk_loss'] = train_fk_loss.result().numpy() row_dict['test_fk_loss'] = test_fk_loss.result().numpy() if cfg.data.use_fd: row_dict['train_fd_loss'] = train_fd_loss.result().numpy() row_dict['test_fd_loss']", "images=images, conf_y=None, dev_x=None, dev_y=dev_y, cfg=cfg) train_counter += 1 with metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train", "step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Train CD loss', train_cd_loss.result(), step=test_counter) if cfg.data.use_fd: tf.summary.scalar('Train FD loss',", "argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() #", "'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss)", "__name__ == \"__main__\": # load config file cfg = ConfigManager(json_name=args.json, retrain=True) # set", "dev_loss elif cfg.mode == 'us2conf': q_loss = mse(conf_y, pred_confs) loss = q_loss else:", "== 'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg)", "cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss']", "(epoch+1) % 10 == 0 or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir))", "tf.summary.scalar('Test FK loss', test_fk_loss.result(), step=test_counter) if cfg.data.use_cd: tf.summary.scalar('Test CD loss', test_cd_loss.result(), step=test_counter) if", "conf_y=test_conf_y, dev_x=None, dev_y=test_dev_y, cfg=cfg) else: # cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey'", "add FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss", "cfg.mode == 'us2multimidi' or cfg.mode == 'us2multikey' dev_loss = bce(dev_y, pred_devs) # log", "loss if cfg.data.use_fk: fk_loss = fke(conf_y, pred_confs) test_fk_loss(fk_loss) # log CD loss if", "= cde(conf_y, pred_confs) if cfg.data.use_cd: loss += cfg.data.cd * cd_loss train_cd_loss(cd_loss) # add", "0 or epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if", "ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'), index=False) # printing print('Epoch {},'.format(epoch+1), end=\" \") if cfg.use_conf: print('C", "= test_fd_loss.result().numpy() if cfg.data.use_cd: row_dict['train_cd_loss'] = train_cd_loss.result().numpy() row_dict['test_cd_loss'] = test_cd_loss.result().numpy() if cfg.use_dev: row_dict['train_dev_loss']", "cfg): with tf.GradientTape() as tape: # get predictions if cfg.mode == 'us2conf2multimidi' or", "end=\" \") if cfg.use_conf: print('C L: {:.5f}, T C L: {:.5f}.'.format(train_c_loss.result(), test_c_loss.result()), end=\"", "fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) test_fd_loss(tf.cast(fd_loss, q_loss.dtype)) if __name__ == \"__main__\":", "'GPU') tf.config.experimental.set_memory_growth(gpus[cfg.system.gpu], True) # check if output folder exists if not os.path.isdir(cfg.output_dir): os.makedirs(cfg.output_dir)", "help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function to", "metrics_writer.as_default(): if cfg.use_conf: tf.summary.scalar('Train C loss', train_c_loss.result(), step=test_counter) if cfg.data.use_fk: tf.summary.scalar('Train FK loss',", "metric_df_columns = [] if cfg.use_conf: metric_df_columns += ['train_c_loss','test_c_loss'] if cfg.data.use_fk: metric_df_columns += ['train_fk_loss','test_fk_loss']", "row_dict = {} if cfg.use_conf: row_dict['train_c_loss'] = train_c_loss.result().numpy() row_dict['test_c_loss'] = test_c_loss.result().numpy() if cfg.data.use_fk:", "tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk: fke = ForwardKinematicsError(cfg=cfg, arm_lengths=data_mng.arm_lengths) if cfg.data.use_cd or cfg.data.use_fd: cde =", "cfg.data.use_cd: test_cd_loss(tf.cast(cd_loss, q_loss.dtype)) # log FD loss if cfg.data.use_fd: fde.set_time_and_grads(timestamps, cde.y_true_grad, cde.y_pred_grad) fd_loss", "metric_df = pd.DataFrame(columns=metric_df_columns) # load train and test datasets data_mng = DataManagement(cfg=cfg) #", "subprocess as sp parser = argparse.ArgumentParser() parser.add_argument('--json', '-json', help='name of json file', default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json',", "end=\" \") if cfg.data.use_fk: print('FK L: {:.5f}, T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\"", "import DataManagement from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu,", "= mse(conf_y, pred_confs) dev_loss = bce(dev_y, pred_devs) elif cfg.mode == 'us2conf': q_loss =", "updated for TF 2.7 (CUDA 11 + CUDNN 8.2) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.set_visible_devices([gpus[cfg.system.gpu]],", "from trainer.losses import ForwardKinematicsError, ConfigurationDynamicsError, ForwardDynamicsError from trainer.utils import ConfigManager, wait_for_gpu, initiate_model, set_random_seed", "cfg.use_dev: row_dict['train_dev_loss'] = train_dev_loss.result().numpy() row_dict['test_dev_loss'] = test_dev_loss.result().numpy() metric_df = metric_df.append(row_dict, ignore_index=True) metric_df.to_csv(path_or_buf=os.path.join(cfg.output_dir, 'metric.csv'),", "epoch == 0: print('GPU: {}, Experiment: {}'.format(cfg.system.gpu,cfg.output_dir)) # save model if (epoch+1) %", "initiate_model(cfg=cfg) if cfg.use_conf: mse = tf.keras.losses.MeanSquaredError() if cfg.use_dev: bce = tf.keras.losses.BinaryCrossentropy(from_logits=True) if cfg.data.use_fk:", "'us2conf': _, test_images, test_conf_y = test_batch test(timestamps=None, images=test_images, conf_y=test_conf_y, dev_x=None, dev_y=None, cfg=cfg) elif", "train @tf.function def train(timestamps, images, conf_y, dev_x, dev_y, cfg): with tf.GradientTape() as tape:", "# add CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs)", "if cfg.mode == 'us2conf2multimidi' or cfg.mode == 'us2conf2multikey': train_c_loss(q_loss) train_dev_loss(dev_loss) elif cfg.mode ==", "== 'us2multimidi' or cfg.mode == 'us2multikey' test_dev_loss(dev_loss) # log FK loss if cfg.data.use_fk:", "L: {:.5f}.'.format(train_cd_loss.result(), test_cd_loss.result()), end=\" \") if cfg.data.use_fd: print('FD L: {:.5f}, T FD L:", "model(images) else: pred_devs = model(images) # compute task losses if cfg.mode == 'us2conf2multimidi'", "cde.y_true_grad, cde.y_pred_grad) fd_loss = fde(conf_y, pred_confs) loss += cfg.data.fd * fd_loss train_fd_loss(fd_loss) #", "if (epoch+1) % cfg.training.cp_interval == 0 and epoch > 0: print('Saving weights to", "T FK L: {:.5f}.'.format(train_fk_loss.result(), test_fk_loss.result()), end=\" \") if cfg.data.use_cd: print('CD L: {:.5f}, T", "log CD loss if cfg.data.use_cd or cfg.data.use_fd: cde.set_time(timestamps) cd_loss = cde(conf_y, pred_confs) if", "default='config/mfm/train_mfm_unet_aida_all_us2multikey_debug.json', type=str) args = parser.parse_args() # tf function to train @tf.function def train(timestamps,", "import prepend_name_scope sys.path.append('.') sys.path.append('..') import os import argparse import numpy as np import" ]
[ "https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed", "\"\"\" if env_id in gym_reg.registry.env_specs: # This may happen during test discovery. logging.warning('Re-registering", "the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in", "LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you may not", "environment. Args: env_id: The ID to register the environment as. class_path: The fully-qualified", "**kwargs): \"\"\"Registers the given class path as a Gym environment. Args: env_id: The", "ID to register the environment as. class_path: The fully-qualified class path of the", "governing permissions and limitations under the License. \"\"\" \"\"\"Helper methods for Gym environment", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import logging from gym.envs import", "register the environment as. class_path: The fully-qualified class path of the environment. **kwargs:", "OR CONDITIONS OF ANY KIND, either express or implied. See the License for", "OF ANY KIND, either express or implied. See the License for the specific", "registration.\"\"\" import logging from gym.envs import registration as gym_reg def register(env_id: str, class_path:", "to in writing, software distributed under the License is distributed on an \"AS", "This may happen during test discovery. logging.warning('Re-registering environment %s', env_id) del gym_reg.registry.env_specs[env_id] gym_reg.register(env_id,", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "str, class_path: str, **kwargs): \"\"\"Registers the given class path as a Gym environment.", "not use this file except in compliance with the License. You may obtain", "class path of the environment. **kwargs: Key-word arguments to pass to gym's register", "obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law", "except in compliance with the License. You may obtain a copy of the", "The ID to register the environment as. class_path: The fully-qualified class path of", "may not use this file except in compliance with the License. You may", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "to gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs: # This may happen", "gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given class path as", "from gym.envs import registration as gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers", "2021 Google LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you", "as. class_path: The fully-qualified class path of the environment. **kwargs: Key-word arguments to", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "import logging from gym.envs import registration as gym_reg def register(env_id: str, class_path: str,", "path as a Gym environment. Args: env_id: The ID to register the environment", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "to pass to gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs: # This", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "gym_reg.registry.env_specs: # This may happen during test discovery. logging.warning('Re-registering environment %s', env_id) del", "the given class path as a Gym environment. Args: env_id: The ID to", "of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to", "pass to gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs: # This may", "a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or", "the License for the specific language governing permissions and limitations under the License.", "methods for Gym environment registration.\"\"\" import logging from gym.envs import registration as gym_reg", "ANY KIND, either express or implied. See the License for the specific language", "happen during test discovery. logging.warning('Re-registering environment %s', env_id) del gym_reg.registry.env_specs[env_id] gym_reg.register(env_id, entry_point=class_path, **kwargs)", "file except in compliance with the License. You may obtain a copy of", "License for the specific language governing permissions and limitations under the License. \"\"\"", "The fully-qualified class path of the environment. **kwargs: Key-word arguments to pass to", "Unless required by applicable law or agreed to in writing, software distributed under", "to register the environment as. class_path: The fully-qualified class path of the environment.", "Args: env_id: The ID to register the environment as. class_path: The fully-qualified class", "logging from gym.envs import registration as gym_reg def register(env_id: str, class_path: str, **kwargs):", "2.0 (the \"License\"); you may not use this file except in compliance with", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "gym.envs import registration as gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the", "License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing,", "environment registration.\"\"\" import logging from gym.envs import registration as gym_reg def register(env_id: str,", "See the License for the specific language governing permissions and limitations under the", "permissions and limitations under the License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\"", "given class path as a Gym environment. Args: env_id: The ID to register", "# This may happen during test discovery. logging.warning('Re-registering environment %s', env_id) del gym_reg.registry.env_specs[env_id]", "\"\"\"Helper methods for Gym environment registration.\"\"\" import logging from gym.envs import registration as", "Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the \"License\");", "path of the environment. **kwargs: Key-word arguments to pass to gym's register function.", "function. \"\"\" if env_id in gym_reg.registry.env_specs: # This may happen during test discovery.", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "Gym environment registration.\"\"\" import logging from gym.envs import registration as gym_reg def register(env_id:", "def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given class path as a", "License, Version 2.0 (the \"License\"); you may not use this file except in", "class_path: The fully-qualified class path of the environment. **kwargs: Key-word arguments to pass", "compliance with the License. You may obtain a copy of the License at", "class path as a Gym environment. Args: env_id: The ID to register the", "may happen during test discovery. logging.warning('Re-registering environment %s', env_id) del gym_reg.registry.env_specs[env_id] gym_reg.register(env_id, entry_point=class_path,", "(the \"License\"); you may not use this file except in compliance with the", "this file except in compliance with the License. You may obtain a copy", "the License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import logging from gym.envs", "\"\"\" Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the", "under the License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import logging from", "registration as gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given class", "fully-qualified class path of the environment. **kwargs: Key-word arguments to pass to gym's", "\"License\"); you may not use this file except in compliance with the License.", "express or implied. See the License for the specific language governing permissions and", "environment as. class_path: The fully-qualified class path of the environment. **kwargs: Key-word arguments", "environment. **kwargs: Key-word arguments to pass to gym's register function. \"\"\" if env_id", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "you may not use this file except in compliance with the License. You", "License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required", "agreed to in writing, software distributed under the License is distributed on an", "the specific language governing permissions and limitations under the License. \"\"\" \"\"\"Helper methods", "register function. \"\"\" if env_id in gym_reg.registry.env_specs: # This may happen during test", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Gym environment. Args: env_id: The ID to register the environment as. class_path: The", "gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs: # This may happen during", "class_path: str, **kwargs): \"\"\"Registers the given class path as a Gym environment. Args:", "with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0", "software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "by applicable law or agreed to in writing, software distributed under the License", "applicable law or agreed to in writing, software distributed under the License is", "implied. See the License for the specific language governing permissions and limitations under", "in gym_reg.registry.env_specs: # This may happen during test discovery. logging.warning('Re-registering environment %s', env_id)", "the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless", "arguments to pass to gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs: #", "for Gym environment registration.\"\"\" import logging from gym.envs import registration as gym_reg def", "for the specific language governing permissions and limitations under the License. \"\"\" \"\"\"Helper", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License", "limitations under the License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import logging", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "language governing permissions and limitations under the License. \"\"\" \"\"\"Helper methods for Gym", "and limitations under the License. \"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import", "register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given class path as a Gym", "\"\"\" \"\"\"Helper methods for Gym environment registration.\"\"\" import logging from gym.envs import registration", "import registration as gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given", "str, **kwargs): \"\"\"Registers the given class path as a Gym environment. Args: env_id:", "You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by", "copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed", "may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable", "law or agreed to in writing, software distributed under the License is distributed", "\"\"\"Registers the given class path as a Gym environment. Args: env_id: The ID", "at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "the environment. **kwargs: Key-word arguments to pass to gym's register function. \"\"\" if", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "env_id: The ID to register the environment as. class_path: The fully-qualified class path", "Version 2.0 (the \"License\"); you may not use this file except in compliance", "in compliance with the License. You may obtain a copy of the License", "the Apache License, Version 2.0 (the \"License\"); you may not use this file", "use this file except in compliance with the License. You may obtain a", "KIND, either express or implied. See the License for the specific language governing", "as a Gym environment. Args: env_id: The ID to register the environment as.", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use", "as gym_reg def register(env_id: str, class_path: str, **kwargs): \"\"\"Registers the given class path", "env_id in gym_reg.registry.env_specs: # This may happen during test discovery. logging.warning('Re-registering environment %s',", "of the environment. **kwargs: Key-word arguments to pass to gym's register function. \"\"\"", "in writing, software distributed under the License is distributed on an \"AS IS\"", "under the Apache License, Version 2.0 (the \"License\"); you may not use this", "writing, software distributed under the License is distributed on an \"AS IS\" BASIS,", "either express or implied. See the License for the specific language governing permissions", "the environment as. class_path: The fully-qualified class path of the environment. **kwargs: Key-word", "if env_id in gym_reg.registry.env_specs: # This may happen during test discovery. logging.warning('Re-registering environment", "or agreed to in writing, software distributed under the License is distributed on", "Key-word arguments to pass to gym's register function. \"\"\" if env_id in gym_reg.registry.env_specs:", "Google LLC Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "Apache License, Version 2.0 (the \"License\"); you may not use this file except", "or implied. See the License for the specific language governing permissions and limitations", "required by applicable law or agreed to in writing, software distributed under the", "**kwargs: Key-word arguments to pass to gym's register function. \"\"\" if env_id in", "a Gym environment. Args: env_id: The ID to register the environment as. class_path:", "specific language governing permissions and limitations under the License. \"\"\" \"\"\"Helper methods for" ]
[ "NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] =", "= { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this", "if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY}", "LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this in local.py if you want", "HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', }", "'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this in local.py", "} LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this in local.py if you", "= {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\":", "except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"]", "* try: from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if", "ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except", "DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"]", ".base import * try: from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] =", "{ \"handlers\": [\"null\"], # replace this in local.py if you want logging \"level\":", "from .base import * try: from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"]", "CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS", "\"handlers\": [\"null\"], # replace this in local.py if you want logging \"level\": \"ERROR\",", "import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\")", "MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] =", "import * try: from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1", "except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS", "LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace", "replace this in local.py if you want logging \"level\": \"ERROR\", \"propagate\": True, }", "try: from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG:", "from .local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert(", "= 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS =", "# replace this in local.py if you want logging \"level\": \"ERROR\", \"propagate\": True,", "= { \"handlers\": [\"null\"], # replace this in local.py if you want logging", "{ 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this in", "HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = {", "'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], # replace this in local.py if", ".local import * except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0,", "1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\":", "<filename>moztrap/settings/default.py from .base import * try: from .local import * except ImportError: pass", "SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"], #", "* except ImportError: pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try:", "pass CACHES[\"default\"][\"VERSION\"] = 1 if DEBUG: MIDDLEWARE_CLASSES.insert( 0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError:", "\"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG',", "0, \"moztrap.debug.middleware.AjaxTracebackMiddleware\") try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = {", "{\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', } LOGGING[\"loggers\"][\"moztrap\"] = { \"handlers\": [\"null\"],", "[\"null\"], # replace this in local.py if you want logging \"level\": \"ERROR\", \"propagate\":", "try: HMAC_KEYS except NameError: HMAC_KEYS = {\"default\": SECRET_KEY} LOGGING[\"handlers\"][\"null\"] = { 'level':'DEBUG', 'class':'django.utils.log.NullHandler'," ]
[ "- High-Altitude Solar Power Research # Script to get alleviation of supply/demand mismatch", "<reponame>bonesbb/HASPR<filename>Analysis Scripts/supply_demand_mismatch.py # HASPR - High-Altitude Solar Power Research # Script to get", "Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\" # directory containing generation profiles", "current_import_offset = [] for i in range(17520): total_import = (-1) * mismatch_values[i] #", "filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files and build result objects: results", "file_path = inputDirectory + haspr.osPathDelimiter + f # get generation profile: extracted_array =", "from numpy import genfromtxt # PARAMETERS # # path to .csv file of", "path delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract", "\" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\")", "haspr import Dataset from numpy import genfromtxt # PARAMETERS # # path to", "\"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\")", "days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\"", "for i in range(17520): total_import = (-1) * mismatch_values[i] # -ve value for", "0.0 if total_import > 0: import_offset = min(generation, total_import) # can't offset more", "- UTC time.csv\" # directory containing generation profiles (30min res, Wh) to run", "+ str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results: for r in results: r.dump()", "- 30 to 65 deg winter opt\" # OS path delimiter (\"\\\\\" for", "f[0:len(f) - 4] + \" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC),", "+ \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results: for r", "inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory =", "windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch =", "opt\" # OS path delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter =", "generation = gen_values[i] import_offset = 0.0 if total_import > 0: import_offset = min(generation,", "Demand Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case", "supply/demand mismatch data (Wh, UTC, 30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply", "days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory", "= \"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps =", "mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in inputDirectory: file_names = []", "result objects: results = [] for f in file_names: file_path = inputDirectory +", "= [] for f in file_names: file_path = inputDirectory + haspr.osPathDelimiter + f", "= genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we only want generation", "walk(inputDirectory): file_names.extend(filenames) # cycle through files and build result objects: results = []", "genfromtxt # PARAMETERS # # path to .csv file of supply/demand mismatch data", "+ haspr.osPathDelimiter + f # get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)", "to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to", "timestamps = [] mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) #", "[] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files and", "res - UTC time.csv\" # directory containing generation profiles (30min res, Wh) to", "import_offset = 0.0 if total_import > 0: import_offset = min(generation, total_import) # can't", "import generation = gen_values[i] import_offset = 0.0 if total_import > 0: import_offset =", "supply/demand mismatch given generation profiles # Version 0.1 # Author: neyring from os", "for mismatch => import generation = gen_values[i] import_offset = 0.0 if total_import >", "only want generation values # calculate import offset: current_import_offset = [] for i", "generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we", "directory containing generation profiles (30min res, Wh) to run our analyses on (without", "mismatch => import generation = gen_values[i] import_offset = 0.0 if total_import > 0:", "= \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply", "generation values # calculate import offset: current_import_offset = [] for i in range(17520):", "total_import > 0: import_offset = min(generation, total_import) # can't offset more than total", "# cycle through files and build result objects: results = [] for f", "current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j in range(17520):", "cycle through files and build result objects: results = [] for f in", "Power Research # Script to get alleviation of supply/demand mismatch given generation profiles", "files and build result objects: results = [] for f in file_names: file_path", "haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0]))", "results = [] for f in file_names: file_path = inputDirectory + haspr.osPathDelimiter +", "= extracted_array[:, 1] # we only want generation values # calculate import offset:", "mismatch_values.append(float(p[1])) # get all file names in inputDirectory: file_names = [] for (dirpath,", "\"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand", "# we only want generation values # calculate import offset: current_import_offset = []", "\" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results: for r in results:", "haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65 deg winter opt\"", "# get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1]", "analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to", "for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files and build", "Mismatch\\\\Case 5 - 30 to 65 deg winter opt\" # OS path delimiter", "= gen_values[i] import_offset = 0.0 if total_import > 0: import_offset = min(generation, total_import)", "import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j", "Imports [Wh]\") for j in range(17520): str_to_append = str(timestamps[j]) + \", \" +", "(30min res, Wh) to run our analyses on (without leap days): inputDirectory =", "winter opt\" # OS path delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter", "than total imports current_import_offset.append(import_offset) # build current result object: result_title = f[0:len(f) -", "= str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results:", "* mismatch_values[i] # -ve value for mismatch => import generation = gen_values[i] import_offset", "Demand Mismatch\\\\Case 5 - 30 to 65 deg winter opt\" # OS path", "given generation profiles # Version 0.1 # Author: neyring from os import walk", "object: result_title = f[0:len(f) - 4] + \" - import offset\" current_result =", "imports current_import_offset.append(import_offset) # build current result object: result_title = f[0:len(f) - 4] +", "Script to get alleviation of supply/demand mismatch given generation profiles # Version 0.1", "genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we only want generation values", "(-1) * mismatch_values[i] # -ve value for mismatch => import generation = gen_values[i]", "= Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = [] for p in", "\"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\" # directory containing", "in walk(inputDirectory): file_names.extend(filenames) # cycle through files and build result objects: results =", "for f in file_names: file_path = inputDirectory + haspr.osPathDelimiter + f # get", "for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch", "in range(17520): str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) #", "# OS path delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\"", "mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file", "dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files and build result objects:", "= inputDirectory + haspr.osPathDelimiter + f # get generation profile: extracted_array = genfromtxt(file_path,", "file_names: file_path = inputDirectory + haspr.osPathDelimiter + f # get generation profile: extracted_array", "= 0.0 if total_import > 0: import_offset = min(generation, total_import) # can't offset", "# calculate import offset: current_import_offset = [] for i in range(17520): total_import =", "= f[0:len(f) - 4] + \" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time", "total_import = (-1) * mismatch_values[i] # -ve value for mismatch => import generation", "# HASPR - High-Altitude Solar Power Research # Script to get alleviation of", "containing generation profiles (30min res, Wh) to run our analyses on (without leap", "generation profiles # Version 0.1 # Author: neyring from os import walk import", "High-Altitude Solar Power Research # Script to get alleviation of supply/demand mismatch given", "# -ve value for mismatch => import generation = gen_values[i] import_offset = 0.0", "- import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for", "gen_values[i] import_offset = 0.0 if total_import > 0: import_offset = min(generation, total_import) #", "# build current result object: result_title = f[0:len(f) - 4] + \" -", "current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j in range(17520): str_to_append = str(timestamps[j])", "[] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in", "UTC, 30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch -", "[Wh]\") for j in range(17520): str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j])", "PARAMETERS # # path to .csv file of supply/demand mismatch data (Wh, UTC,", "to run our analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\"", "build result objects: results = [] for f in file_names: file_path = inputDirectory", "[] mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all", "in file_names: file_path = inputDirectory + haspr.osPathDelimiter + f # get generation profile:", "range(17520): str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump", "# directory containing generation profiles (30min res, Wh) to run our analyses on", "# Author: neyring from os import walk import haspr from haspr import Result", "leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC", "(without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output", "Solar Power Research # Script to get alleviation of supply/demand mismatch given generation", "0.1 # Author: neyring from os import walk import haspr from haspr import", "Dataset from numpy import genfromtxt # PARAMETERS # # path to .csv file", "[] for i in range(17520): total_import = (-1) * mismatch_values[i] # -ve value", "> 0: import_offset = min(generation, total_import) # can't offset more than total imports", "time.csv\" # directory containing generation profiles (30min res, Wh) to run our analyses", "Mismatch\\\\In\" # directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5", "numpy import genfromtxt # PARAMETERS # # path to .csv file of supply/demand", "can't offset more than total imports current_import_offset.append(import_offset) # build current result object: result_title", "data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = [] for", "offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j in", "from haspr import Result from haspr import Dataset from numpy import genfromtxt #", "# PARAMETERS # # path to .csv file of supply/demand mismatch data (Wh,", "value for mismatch => import generation = gen_values[i] import_offset = 0.0 if total_import", "# directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 -", "inputDirectory + haspr.osPathDelimiter + f # get generation profile: extracted_array = genfromtxt(file_path, delimiter=',',", "total imports current_import_offset.append(import_offset) # build current result object: result_title = f[0:len(f) - 4]", "current result object: result_title = f[0:len(f) - 4] + \" - import offset\"", "profiles # Version 0.1 # Author: neyring from os import walk import haspr", "Reduction in Imports [Wh]\") for j in range(17520): str_to_append = str(timestamps[j]) + \",", "offset: current_import_offset = [] for i in range(17520): total_import = (-1) * mismatch_values[i]", "names in inputDirectory: file_names = [] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames)", "run our analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" #", "on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write", "file names in inputDirectory: file_names = [] for (dirpath, dirnames, filenames) in walk(inputDirectory):", "min(generation, total_import) # can't offset more than total imports current_import_offset.append(import_offset) # build current", "file_names = [] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through", "Scripts/supply_demand_mismatch.py # HASPR - High-Altitude Solar Power Research # Script to get alleviation", "= \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\" # directory", "import Result from haspr import Dataset from numpy import genfromtxt # PARAMETERS #", "haspr from haspr import Result from haspr import Dataset from numpy import genfromtxt", "mismatch_values[i] # -ve value for mismatch => import generation = gen_values[i] import_offset =", "30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min", "import genfromtxt # PARAMETERS # # path to .csv file of supply/demand mismatch", "Version 0.1 # Author: neyring from os import walk import haspr from haspr", "mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = [] for p", "1] # we only want generation values # calculate import offset: current_import_offset =", "delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we only want generation values #", "= (-1) * mismatch_values[i] # -ve value for mismatch => import generation =", "haspr import Result from haspr import Dataset from numpy import genfromtxt # PARAMETERS", "file_names.extend(filenames) # cycle through files and build result objects: results = [] for", "haspr.osPathDelimiter + f # get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values", "os import walk import haspr from haspr import Result from haspr import Dataset", "alleviation of supply/demand mismatch given generation profiles # Version 0.1 # Author: neyring", "Research # Script to get alleviation of supply/demand mismatch given generation profiles #", "str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all", "output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65 deg", "offset more than total imports current_import_offset.append(import_offset) # build current result object: result_title =", "\", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results: for r in", "= [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names", "import_offset = min(generation, total_import) # can't offset more than total imports current_import_offset.append(import_offset) #", "skip_header=1) gen_values = extracted_array[:, 1] # we only want generation values # calculate", "res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res", "directory to write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30", "\"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = []", "all file names in inputDirectory: file_names = [] for (dirpath, dirnames, filenames) in", "mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\" #", "we only want generation values # calculate import offset: current_import_offset = [] for", "= [] for i in range(17520): total_import = (-1) * mismatch_values[i] # -ve", "i in range(17520): total_import = (-1) * mismatch_values[i] # -ve value for mismatch", "- 4] + \" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction", "mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = []", "import Dataset from numpy import genfromtxt # PARAMETERS # # path to .csv", "mismatch) timestamps = [] mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1]))", "current_import_offset.append(import_offset) # build current result object: result_title = f[0:len(f) - 4] + \"", "# # path to .csv file of supply/demand mismatch data (Wh, UTC, 30min", "for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath,", "path to .csv file of supply/demand mismatch data (Wh, UTC, 30min res, no", "+ f # get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values =", "calculate import offset: current_import_offset = [] for i in range(17520): total_import = (-1)", "unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch)", "=> import generation = gen_values[i] import_offset = 0.0 if total_import > 0: import_offset", "for j in range(17520): str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append)", "f in file_names: file_path = inputDirectory + haspr.osPathDelimiter + f # get generation", "= Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j in range(17520): str_to_append", "profiles (30min res, Wh) to run our analyses on (without leap days): inputDirectory", "haspr.osPathDelimiter = \"\\\\\" # extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps", "get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] #", "of supply/demand mismatch data (Wh, UTC, 30min res, no leap days): mismatchPath =", "+ \" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports", "build current result object: result_title = f[0:len(f) - 4] + \" - import", "res, Wh) to run our analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply", "timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in inputDirectory: file_names = [] for", "# path to .csv file of supply/demand mismatch data (Wh, UTC, 30min res,", "more than total imports current_import_offset.append(import_offset) # build current result object: result_title = f[0:len(f)", "result_title = f[0:len(f) - 4] + \" - import offset\" current_result = Result(result_title)", "to .csv file of supply/demand mismatch data (Wh, UTC, 30min res, no leap", "leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory to write output to:", "\"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65 deg winter opt\" # OS", "-ve value for mismatch => import generation = gen_values[i] import_offset = 0.0 if", "mismatch data (Wh, UTC, 30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand", "import haspr from haspr import Result from haspr import Dataset from numpy import", "# can't offset more than total imports current_import_offset.append(import_offset) # build current result object:", "Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in Imports [Wh]\") for j in range(17520): str_to_append =", "# Version 0.1 # Author: neyring from os import walk import haspr from", "mismatch given generation profiles # Version 0.1 # Author: neyring from os import", ".csv file of supply/demand mismatch data (Wh, UTC, 30min res, no leap days):", "# extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values", "f # get generation profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:,", "Mismatch - 30min res - UTC time.csv\" # directory containing generation profiles (30min", "Wh) to run our analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand", "5 - 30 to 65 deg winter opt\" # OS path delimiter (\"\\\\\"", "generation profiles (30min res, Wh) to run our analyses on (without leap days):", "import offset: current_import_offset = [] for i in range(17520): total_import = (-1) *", "Result from haspr import Dataset from numpy import genfromtxt # PARAMETERS # #", "if total_import > 0: import_offset = min(generation, total_import) # can't offset more than", "our analyses on (without leap days): inputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\In\" # directory", "- 30min res - UTC time.csv\" # directory containing generation profiles (30min res,", "and build result objects: results = [] for f in file_names: file_path =", "objects: results = [] for f in file_names: file_path = inputDirectory + haspr.osPathDelimiter", "4] + \" - import offset\" current_result = Result(result_title) current_result.payload.append(\"Time (UTC), Reduction in", "Demand Mismatch\\\\5_2018 Mismatch - 30min res - UTC time.csv\" # directory containing generation", "deg winter opt\" # OS path delimiter (\"\\\\\" for windows, \"/\" for unix)\"", "to get alleviation of supply/demand mismatch given generation profiles # Version 0.1 #", "through files and build result objects: results = [] for f in file_names:", "import walk import haspr from haspr import Result from haspr import Dataset from", "extracted_array[:, 1] # we only want generation values # calculate import offset: current_import_offset", "to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65 deg winter", "neyring from os import walk import haspr from haspr import Result from haspr", "0: import_offset = min(generation, total_import) # can't offset more than total imports current_import_offset.append(import_offset)", "= \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65 deg winter opt\" #", "(\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch data:", "result object: result_title = f[0:len(f) - 4] + \" - import offset\" current_result", "UTC time.csv\" # directory containing generation profiles (30min res, Wh) to run our", "OS path delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" #", "[] for f in file_names: file_path = inputDirectory + haspr.osPathDelimiter + f #", "of supply/demand mismatch given generation profiles # Version 0.1 # Author: neyring from", "for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in inputDirectory:", "# get all file names in inputDirectory: file_names = [] for (dirpath, dirnames,", "(UTC), Reduction in Imports [Wh]\") for j in range(17520): str_to_append = str(timestamps[j]) +", "from haspr import Dataset from numpy import genfromtxt # PARAMETERS # # path", "HASPR - High-Altitude Solar Power Research # Script to get alleviation of supply/demand", "walk import haspr from haspr import Result from haspr import Dataset from numpy", "to 65 deg winter opt\" # OS path delimiter (\"\\\\\" for windows, \"/\"", "profile: extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we only", "str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result) # dump all results: for", "gen_values = extracted_array[:, 1] # we only want generation values # calculate import", "from os import walk import haspr from haspr import Result from haspr import", "30min res - UTC time.csv\" # directory containing generation profiles (30min res, Wh)", "30 to 65 deg winter opt\" # OS path delimiter (\"\\\\\" for windows,", "Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values = [] for p in mismatch.payload:", "inputDirectory: file_names = [] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle", "in range(17520): total_import = (-1) * mismatch_values[i] # -ve value for mismatch =>", "Author: neyring from os import walk import haspr from haspr import Result from", "data (Wh, UTC, 30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018", "get all file names in inputDirectory: file_names = [] for (dirpath, dirnames, filenames)", "get alleviation of supply/demand mismatch given generation profiles # Version 0.1 # Author:", "= [] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files", "extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1) gen_values = extracted_array[:, 1] # we only want", "values # calculate import offset: current_import_offset = [] for i in range(17520): total_import", "file of supply/demand mismatch data (Wh, UTC, 30min res, no leap days): mismatchPath", "p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in inputDirectory: file_names", "in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get all file names in inputDirectory: file_names =", "j in range(17520): str_to_append = str(timestamps[j]) + \", \" + str(current_import_offset[j]) current_result.payload.append(str_to_append) results.append(current_result)", "65 deg winter opt\" # OS path delimiter (\"\\\\\" for windows, \"/\" for", "write output to: haspr.outputDirectory = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\Case 5 - 30 to 65", "= min(generation, total_import) # can't offset more than total imports current_import_offset.append(import_offset) # build", "in Imports [Wh]\") for j in range(17520): str_to_append = str(timestamps[j]) + \", \"", "# Script to get alleviation of supply/demand mismatch given generation profiles # Version", "delimiter (\"\\\\\" for windows, \"/\" for unix)\" haspr.osPathDelimiter = \"\\\\\" # extract mismatch", "extract mismatch data: mismatch = Dataset(\"mismatch\") haspr.get_csv_data(mismatchPath, mismatch) timestamps = [] mismatch_values =", "total_import) # can't offset more than total imports current_import_offset.append(import_offset) # build current result", "want generation values # calculate import offset: current_import_offset = [] for i in", "no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch - 30min res -", "in inputDirectory: file_names = [] for (dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) #", "= [] mismatch_values = [] for p in mismatch.payload: timestamps.append(str(p[0])) mismatch_values.append(float(p[1])) # get", "(dirpath, dirnames, filenames) in walk(inputDirectory): file_names.extend(filenames) # cycle through files and build result", "(Wh, UTC, 30min res, no leap days): mismatchPath = \"D:\\\\00_Results\\\\03_Supply Demand Mismatch\\\\5_2018 Mismatch", "range(17520): total_import = (-1) * mismatch_values[i] # -ve value for mismatch => import" ]
[ "context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result = table.scan( ProjectionExpression='subject', ) return", "def lambda_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result = table.scan( ProjectionExpression='subject',", "lambda_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result = table.scan( ProjectionExpression='subject', )", "import boto3 def lambda_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result =", "dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result = table.scan( ProjectionExpression='subject', ) return (result['Items'])", "<gh_stars>0 import boto3 def lambda_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result", "boto3 def lambda_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('<DBName>') result = table.scan(" ]
[ "pytest from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance):", "test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only", "Inc. 2019 # All rights reserved # Licensed under Simplified BSD License (see", "reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist) for metric", "platform import pytest from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator,", "pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS:", "2019 # All rights reserved # Licensed under Simplified BSD License (see LICENSE)", "License (see LICENSE) import platform import pytest from . import common pytestmark =", "def test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux',", "in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def", "LICENSE) import platform import pytest from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\")", "# (C) Datadog, Inc. 2019 # All rights reserved # Licensed under Simplified", "(see LICENSE) import platform import pytest from . import common pytestmark = pytest.mark.integration", "from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance)", "@pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() !=", "(C) Datadog, Inc. 2019 # All rights reserved # Licensed under Simplified BSD", "# Licensed under Simplified BSD License (see LICENSE) import platform import pytest from", "on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist) for metric in common.CONNTRACK_METRICS:", "import pytest from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check,", "rights reserved # Licensed under Simplified BSD License (see LICENSE) import platform import", "@pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist):", "common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator,", "reserved # Licensed under Simplified BSD License (see LICENSE) import platform import pytest", "BSD License (see LICENSE) import platform import pytest from . import common pytestmark", "!= 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist)", "'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist) for", "Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist) for metric in common.CONNTRACK_METRICS: aggregator.assert_metric(metric)", "for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\")", "aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check,", "= pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric)", "runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\") def test_check_linux(aggregator, check, instance_blacklist): check.check(instance_blacklist) for metric in", "Licensed under Simplified BSD License (see LICENSE) import platform import pytest from .", "common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric in", "Simplified BSD License (see LICENSE) import platform import pytest from . import common", "Datadog, Inc. 2019 # All rights reserved # Licensed under Simplified BSD License", "check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix", "import platform import pytest from . import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def", "instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on", "All rights reserved # Licensed under Simplified BSD License (see LICENSE) import platform", "import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric", "under Simplified BSD License (see LICENSE) import platform import pytest from . import", "<reponame>glasser/integrations-core<gh_stars>1-10 # (C) Datadog, Inc. 2019 # All rights reserved # Licensed under", "check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs", "pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system()", ". import common pytestmark = pytest.mark.integration @pytest.mark.usefixtures(\"dd_environment\") def test_check(aggregator, check, instance): check.check(instance) for", "# All rights reserved # Licensed under Simplified BSD License (see LICENSE) import", "metric in common.EXPECTED_METRICS: aggregator.assert_metric(metric) @pytest.mark.skipif(platform.system() != 'Linux', reason=\"Only runs on Unix systems\") @pytest.mark.usefixtures(\"dd_environment\")" ]
[ "'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) #", "zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t, c in", "2] = x3 data[:, 3] = x4 data[:, 4] = np.array(y > .5,", "return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...')", "data_size = len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['#", "'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click',", "https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data:", "axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df =", "dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data',", "x1 - 5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2) /", "assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle", "student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing", "str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {} # copy to", "'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education',", "sum_clicks = x['sum_click'] # for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] =", "describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"# of Cont\": {}, \"#", "# if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file:", "def x1_to_x3(x1): return 1/3 * x1 - 5 def x1x2_to_x4(x1, x2): return x1", "made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\":", "'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status':", "sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types,", "x3, x4): def sigmoid(x): return 1 / (1 + np.exp(-x)) return sigmoid(10.5 *", "adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data =", "x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks =", "count for submitted assessment, not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped", "is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data =", "x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2) / 10 - 10 def", "{} total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score'] =", "index=[0]) def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] # for t, c", "return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight =", "10000) * x3 + 1e-3 * x4) def x1_to_x3(x1): return 1/3 * x1", "student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note:", "'5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'})", "'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click',", "path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data", "def weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight'])", "'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]):", "for submitted assessment, not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped =", "2) / 10 - 10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\"", "{'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th':", "15, 10000) x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1,", "adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight = sum(x['weight'])", "= x1 data[:, 1] = x2 data[:, 2] = x3 data[:, 3] =", "'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module',", "= grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation',", "x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1] = x2", "on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score',", "= adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th':", "5)) data[:, 0] = x1 data[:, 1] = x2 data[:, 2] = x3", "file_name) return old_param # Cell def bn_func(x1, x2, x3, x4): def sigmoid(x): return", "Cell from .import_essentials import * # Cell def dict2json(dictionary: Dict[str, Any], file_name: str):", "pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv')", "{\"size\": {}, \"# of Cont\": {}, \"# of Cat\": {}} for data_name, config", "Cont\": {}, \"# of Cat\": {}} for data_name, config in configs: data =", "d = {} total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0:", "x2) + np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3, x4) data =", "'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data", "'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data,", "found.\") with open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str): if", "sigmoid(x): return 1 / (1 + np.exp(-x)) return sigmoid(10.5 * ((x1 * x2)", "'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data =", "sigmoid(10.5 * ((x1 * x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000)", "10000) y = bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0]", "1, 10000) y = bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:,", "'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click',", "+ np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) / 8100) + 10 -", "on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\"", "def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight'] = total_weight", "* np.log(x2 ** 2) / 10 - 10 def bn_gen(): \"\"\" modify code", "'12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status',", "dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv')", "'7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data", "Any], file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name:", "r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name] = cat_len # pd.DataFrame.from_dict(r).to_csv(\"../results/data_describe.csv\") return", "and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns", "{'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status',", "\"# of Cat\": {}} for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size", "print('preprocessing assessment...') # note: only count for submitted assessment, not weighted for unsubmitted", "1e-3 * x4) def x1_to_x3(x1): return 1/3 * x1 - 5 def x1x2_to_x4(x1,", "+ np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0],", "'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}})", "adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School',", "= len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name]", "['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from", "1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th':", "column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation',", "# Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"# of", "if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum(", "assessment...') # note: only count for submitted assessment, not weighted for unsubmitted ones", "print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info", "= np.genfromtxt( path, delimiter=', ', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names", "total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] =", "how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click',", "= x3 data[:, 3] = x4 data[:, 4] = np.array(y > .5, dtype=np.int)", "x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3, x4) data", "adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education',", "'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School',", "zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment", "x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000)", "student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return", "\"# of Cont\": {}, \"# of Cat\": {}} for data_name, config in configs:", "data_size r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name] = cat_len # pd.DataFrame.from_dict(r).to_csv(\"../results/data_describe.csv\")", "x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1)", "adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'):", "how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click',", "np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell", "# Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the", "data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols'])", "sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight']) return", "= ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell", "outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not", "for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c", "'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import", "x2 data[:, 2] = x3 data[:, 3] = x4 data[:, 4] = np.array(y", ") # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num',", "{'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government',", "* ((x1 * x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000) *", "= x['activity_type'] sum_clicks = x['sum_click'] # for t, c in zip(types, sum_clicks): #", "file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {} # copy", "delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str )", "student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index()", "pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] # for t,", "x1 data[:, 1] = x2 data[:, 2] = x3 data[:, 3] = x4", "file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path):", "{}, \"# of Cont\": {}, \"# of Cat\": {}} for data_name, config in", "'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click',", "x2): return x1 * np.log(x2 ** 2) / 10 - 10 def bn_gen():", "# Cell from .import_essentials import * # Cell def dict2json(dictionary: Dict[str, Any], file_name:", "adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x):", "= grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks)", "https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace(", "t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses", "'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education':", "return x1 * np.log(x2 ** 2) / 10 - 10 def bn_gen(): \"\"\"", "\"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data", "pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for submitted assessment,", "FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict,", "vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module',", "'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d =", "columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset", "c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t,", ") else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str ) # column names", "'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv':", "raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file: return json.load(json_file) def update_json_file(param:", "adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based", "adult_data: returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is", "pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') #", "x4): def sigmoid(x): return 1 / (1 + np.exp(-x)) return sigmoid(10.5 * ((x1", "of Cont\": {}, \"# of Cat\": {}} for data_name, config in configs: data", "'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click',", "DO NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json',", "for k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param # Cell", "'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell", "'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married',", "17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2)", "np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y =", "'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar',", "more details on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617", "'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import * # Cell def", "1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y = bn_func(x1,", "['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle", "{\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked':", "'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair':", "c return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas", "x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3,", "'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click',", "else: d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def", "'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv':", "1] = x2 data[:, 2] = x3 data[:, 3] = x4 data[:, 4]", "adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace(", "total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score'])", "to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3',", "def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"# of Cont\": {},", "'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details", "x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)}, index=[0])", "'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import * # Cell def dict2json(dictionary: Dict[str,", "in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param # Cell def bn_func(x1,", "['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df =", "config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len", "json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is", "'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving':", "c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses =", "def x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2) / 10 - 10", "x3 data[:, 3] = x4 data[:, 4] = np.array(y > .5, dtype=np.int) return", "open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name):", "3] = x4 data[:, 4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1',", "student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click',", "'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data =", "= adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass':", "'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married',", "adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar',", "adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week',", "'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click',", "Cell def bn_func(x1, x2, x3, x4): def sigmoid(x): return 1 / (1 +", "return 1 / (1 + np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) /", "r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name] = cat_len", "returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None:", "old_param[k] = param[k] dict2json(old_param, file_name) return old_param # Cell def bn_func(x1, x2, x3,", "'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data =", "prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed", "in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len =", "# vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site',", "dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {} #", "return json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else:", "update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {}", "* x4) def x1_to_x3(x1): return 1/3 * x1 - 5 def x1x2_to_x4(x1, x2):", "= total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score']", "= np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=',", "'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region',", "(unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset',", "'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def", "of Cont'][data_name] = cont_len r['# of Cat'][data_name] = cat_len # pd.DataFrame.from_dict(r).to_csv(\"../results/data_describe.csv\") return r", "5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2) / 10 -", "pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info =", "dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617", "'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']", "', dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str ) #", "== 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score'] *", "columns=column_names) # For more details on how the below transformations are made, please", "return sigmoid(10.5 * ((x1 * x2) / 8100) + 10 - np.random.normal(1, 0.1,", "np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4) def x1_to_x3(x1): return 1/3", "'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo':", "analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset. copy from", "'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School',", "return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] # for", "os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file: return json.load(json_file)", "'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details on how", "'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df", "'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}})", "'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r", "'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from", "to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data =", "weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score)", "= {} total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score']", "if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else:", "* # Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\") as", "x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1] =", "pd.DataFrame(raw_data, columns=column_names) # For more details on how the below transformations are made,", "'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty':", "sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation',", "'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1)", "'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander':", "= bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1", "\"\"\" if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str )", "/ len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d,", "= param[k] dict2json(old_param, file_name) return old_param # Cell def bn_func(x1, x2, x3, x4):", "grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df", "adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data =", "+ np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y", "adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th':", "= np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) #", "'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"#", "load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data", "np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) / 8100) + 10 - np.random.normal(1,", "{ 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed',", "submitted assessment, not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby(", "\"\"\" x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000) x3 =", "'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby(", "grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'],", "10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4) def x1_to_x3(x1):", "cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len r['# of", "__all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] #", "= np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) +", "adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K':", "'11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th':", "from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ',", "Dict[str, Any], file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def", "= load_configs(file_name) else: old_param = {} # copy to old_param for k in", "x4 data[:, 4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3',", "def bn_func(x1, x2, x3, x4): def sigmoid(x): return 1 / (1 + np.exp(-x))", "'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data =", "cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] =", "= adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income':", "in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t, c", "- 10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50,", "0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight'])", "with open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name):", "/ 8100) + 10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3 *", "1/3 * x1 - 5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2 **", "param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param # Cell def bn_func(x1, x2,", "on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data =", "'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}})", "= assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') #", "= x['sum_click'] # for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c", "'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability',", "bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2", "- np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4) def x1_to_x3(x1): return", "'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age',", "below transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64,", "details on how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data", "with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if", "adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov':", "'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar',", "'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th':", "https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000) x3", "= pd.DataFrame(raw_data, columns=column_names) # For more details on how the below transformations are", "student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight',", "'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and", "['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week',", "transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\":", "'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc',", "x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3", "'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult", "data[:, 3] = x4 data[:, 4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data,", "please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64})", "open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param", "len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0])", "from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on https://rpubs.com/H_Zhu/235617 :return", "adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details on how the below transformations", "adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data =", "\"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender',", "k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param # Cell def", "# Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight']", "x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type']", "preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data", "'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby(", "'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names)", "vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date',", "['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'],", "'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass':", "= adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def", "data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset. copy", "data[:, 4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4',", "as json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param =", "def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] # for t, c in", "d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight']) /", "adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black':", "for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len =", "assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation',", "'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}})", "'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\":", "x3 + 1e-3 * x4) def x1_to_x3(x1): return 1/3 * x1 - 5", "not found.\") with open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str):", "* x1 - 5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2)", "'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs:", "'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service',", "= pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for submitted assessment, not weighted", "= adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay':", "student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle", "ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index(", "indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\")", "= ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss',", "unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df =", "nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen',", "adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov':", "data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1] = x2 data[:,", "sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score'])", "'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender',", "data[:, 2] = x3 data[:, 3] = x4 data[:, 4] = np.array(y >", "'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details on", "https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if", "'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0,", "assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type',", "= adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed',", ":return adult_data: returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path", "r = {\"size\": {}, \"# of Cont\": {}, \"# of Cat\": {}} for", "8100) + 10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4)", "'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners':", "['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle", "'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import * #", "np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1] = x2 data[:, 2] =", "axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle", "/ (1 + np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) / 8100) +", "old_param = load_configs(file_name) else: old_param = {} # copy to old_param for k", "- 5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2 ** 2) / 10", "'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married':", "= sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module',", "code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17,", "= adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government',", "'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band',", "** 2) / 10 - 10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py", "= len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len", "10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y = bn_func(x1, x2,", "columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d", "grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from',", "income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis based on", "'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K':", "# Cell def bn_func(x1, x2, x3, x4): def sigmoid(x): return 1 / (1", "'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent':", "{'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}})", "from .import_essentials import * # Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with", "'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data", "x['sum_click'] # for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return", "\"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2 =", "np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0,", "Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data", "'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str,", "Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as", "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__", "'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click',", "<gh_stars>0 # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified).", "= adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race':", "'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support':", "sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score']", "x['activity_type'] sum_clicks = x['sum_click'] # for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"]", "len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name] =", "'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse':", "dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4)", "'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace(", "for t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv')", "dict2json(old_param, file_name) return old_param # Cell def bn_func(x1, x2, x3, x4): def sigmoid(x):", "https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str", "copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=',", "# for t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\":", "def sigmoid(x): return 1 / (1 + np.exp(-x)) return sigmoid(10.5 * ((x1 *", "'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares", "None: raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt(", "t, c in zip(types, sum_clicks): # x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for", "= student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts',", "modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35,", "'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',", "x1 * np.log(x2 ** 2) / 10 - 10 def bn_gen(): \"\"\" modify", "return old_param # Cell def bn_func(x1, x2, x3, x4): def sigmoid(x): return 1", "'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status':", "is not found.\") with open(file_name) as json_file: return json.load(json_file) def update_json_file(param: dict, file_name:", "'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import * # Cell def dict2json(dictionary:", "EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file',", "'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details on how the below", "weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) ==", "configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols'])", "dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str ) # column", "'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week':", ".5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None):", "raw_data = np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt( path,", "= c return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)}, index=[0]) print('loading", "# Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\") as outfile:", "= adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data", "0.1, 10000) * x3 + 1e-3 * x4) def x1_to_x3(x1): return 1/3 *", "dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads", "are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64,", "data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name]", "how the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype(", "bn_func(x1, x2, x3, x4): def sigmoid(x): return 1 / (1 + np.exp(-x)) return", "axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left')", ".import_essentials import * # Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name,", "'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']]", "old_param # Cell def bn_func(x1, x2, x3, x4): def sigmoid(x): return 1 /", "index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv')", "'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band',", "return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click',", "types = x['activity_type'] sum_clicks = x['sum_click'] # for t, c in zip(types, sum_clicks):", "/ 10 - 10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1", "'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more details on how the", "'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits',", "'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown',", "for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset.", "def load_adult_income_dataset(path=None): \"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for", "'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}})", "'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str", "0] = x1 data[:, 1] = x2 data[:, 2] = x3 data[:, 3]", "'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?':", "'1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data", "as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): # raise", "json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param", "assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...')", "# raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file: return json.load(json_file) def", "+ 10 - np.random.normal(1, 0.1, 10000) * x3 + 1e-3 * x4) def", "= sum(x['score']) / len(x['score']) else: d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight'])", "income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py \"\"\" if path is None: raw_data = np.genfromtxt(", "bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:,", "on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click',", "# For more details on how the below transformations are made, please refer", "'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass':", "vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for submitted assessment, not", "Any]]]): r = {\"size\": {}, \"# of Cont\": {}, \"# of Cat\": {}}", "sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation',", "np.genfromtxt( path, delimiter=', ', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names =", "param[k] dict2json(old_param, file_name) return old_param # Cell def bn_func(x1, x2, x3, x4): def", "{} # copy to old_param for k in param.keys(): old_param[k] = param[k] dict2json(old_param,", "adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay':", "adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc':", "else: raw_data = np.genfromtxt( path, delimiter=', ', dtype=str ) # column names from", "else: old_param = {} # copy to old_param for k in param.keys(): old_param[k]", "\"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass':", "= pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle =", "the below transformations are made, please refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\":", "= {} # copy to old_param for k in param.keys(): old_param[k] = param[k]", "np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5))", "note: only count for submitted assessment, not weighted for unsubmitted ones assessment_merged =", "adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data =", "Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary,", "'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click',", "None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student'])", "adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm':", "10 - 10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 =", "'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender',", "'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str,", "def load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with", "adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown',", "'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle =", "copy to old_param for k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return", "{}, \"# of Cat\": {}} for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\")", "return 1/3 * x1 - 5 def x1x2_to_x4(x1, x2): return x1 * np.log(x2", "raw_data = np.genfromtxt( path, delimiter=', ', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\"", "return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult", "'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import *", "'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\", "delimiter=', ', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass',", "student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing", "For more details on how the below transformations are made, please refer to", "'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {},", "c for t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment =", "def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param =", "np.log(x2 ** 2) / 10 - 10 def bn_gen(): \"\"\" modify code from:", "'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc':", "print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle =", "based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income dataset. copy from https://github.com/interpretml/DiCE/blob/master/dice_ml/utils/helpers.py", "courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle", "{'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar',", "* x3 + 1e-3 * x4) def x1_to_x3(x1): return 1/3 * x1 -", "= {\"size\": {}, \"# of Cont\": {}, \"# of Cat\": {}} for data_name,", "to old_param for k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param", "= sum(x['weight']) d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) /", "if os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {} # copy to old_param", "of Cat\": {}} for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size =", "'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data", "'9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename(", "'age_band', 'studied_credits', 'disability', 'final_result']] # Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r =", "'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']]", "'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'],", "= pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only", "{'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other',", "Dict[str, Any]]]): r = {\"size\": {}, \"# of Cont\": {}, \"# of Cat\":", "= x4 data[:, 4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2',", "edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4',", "x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1]", "10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) +", "adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked':", "'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc':", "'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module',", "pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size", "def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000)", "* x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000) * x3 +", "'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials import * # Cell", "'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse':", "'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown',", "y = bn_func(x1, x2, x3, x4) data = np.zeros((x1.shape[0], 5)) data[:, 0] =", "if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name) as json_file: return", "np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data =", "adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}})", "0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th':", "* x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks", "student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for", "(1 + np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) / 8100) + 10", "{}} for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len", "adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar',", "assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist", "AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ =", "'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other',", "'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data #", "'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race',", "'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data", "'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return", "= adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc',", "/ sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click']", "= data_size r['# of Cont'][data_name] = cont_len r['# of Cat'][data_name] = cat_len #", "= pd.read_csv(f\"{config['data_dir']}\") data_size = len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] =", "'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For", "Cell def describe(configs: List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"# of Cont\":", "d['weight'] = total_weight if sum(x['weight']) == 0: d['weighted_score'] = sum(x['score']) / len(x['score']) else:", "= student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'], axis=1)", "otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data',", "only count for submitted assessment, not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment)", "load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name} is not found.\") with open(file_name)", "{'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service':", "= np.zeros((x1.shape[0], 5)) data[:, 0] = x1 data[:, 1] = x2 data[:, 2]", "adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial':", "'folder_click', 'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] #", "'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'income']] adult_data", "os.path.exists(file_name): old_param = load_configs(file_name) else: old_param = {} # copy to old_param for", "json_file: return json.load(json_file) def update_json_file(param: dict, file_name: str): if os.path.exists(file_name): old_param = load_configs(file_name)", "pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for submitted assessment, not weighted for", "adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing':", "{'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data", "'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}})", "= pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...')", "'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces':", "File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func',", "len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name]", "= adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar',", "# copy to old_param for k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name)", "4] = np.array(y > .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y'])", "# column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status',", "= np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 =", "((x1 * x2) / 8100) + 10 - np.random.normal(1, 0.1, 10000) * x3", "{'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School',", "vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop(", "= adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct':", "List[Dict[str, Dict[str, Any]]]): r = {\"size\": {}, \"# of Cont\": {}, \"# of", "{'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov':", "old_param = {} # copy to old_param for k in param.keys(): old_param[k] =", "np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': {", "from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race',", "'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe'] # Cell from .import_essentials", "'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School',", "'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown':", "'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': { 'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data =", "path, delimiter=', ', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age',", "'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace( {'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data =", "import * # Cell def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\")", "> .5, dtype=np.int) return pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def", "'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click',", "'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}})", "pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count", "NOT EDIT! File to edit: nbs/01b_data.ipynb (unless otherwise specified). __all__ = ['dict2json', 'load_configs',", "Cat\": {}} for data_name, config in configs: data = pd.read_csv(f\"{config['data_dir']}\") data_size = len(data)", "= adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass',", "'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data =", "10000) x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000)", "refer to https://rpubs.com/H_Zhu/235617 adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data", "= pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle =", "pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv') student_vle = pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv')", "for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df", "old_param for k in param.keys(): old_param[k] = param[k] dict2json(old_param, file_name) return old_param #", "assessment, not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module',", "'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data", "np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}})", "'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service',", "names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship',", "'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data", "data[:, 1] = x2 data[:, 2] = x3 data[:, 3] = x4 data[:,", "np.genfromtxt( 'https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', delimiter=', ', dtype=str ) else: raw_data = np.genfromtxt( path, delimiter=', ',", "{'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation':", "'hours-per-week': 'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {}", "'describe'] # Cell from .import_essentials import * # Cell def dict2json(dictionary: Dict[str, Any],", "adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School',", "\"\"\"Loads adult income dataset from https://archive.ics.uci.edu/ml/datasets/Adult and prepares the data for data analysis", "'School'}}) adult_data = adult_data.rename( columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) return adult_data # Cell def", "# note: only count for submitted assessment, not weighted for unsubmitted ones assessment_merged", "d['weighted_score'] = sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x):", "pd.DataFrame(data, columns=['x1', 'x2', 'x3', 'x4', 'y']) # Cell def load_adult_income_dataset(path=None): \"\"\"Loads adult income", "grouped_vle = sumed_vle.groupby( ['code_module', 'code_presentation', 'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df,", "assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation', 'id_student']).apply(weighted_score) assessment_df = assessment_grouped.reset_index( None).drop(['level_3'],", "'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service',", "'>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School',", "= pd.read_csv(f'{path}/studentVle.csv') vle = pd.read_csv(f'{path}/vle.csv') print('preprocessing assessment...') # note: only count for submitted", "= student_vle.merge(vle).groupby( ['activity_type', 'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'],", "'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace( {'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data", "'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional',", "pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist = pd.read_csv(f'{path}/studentRegistration.csv')", "+ 1e-3 * x4) def x1_to_x3(x1): return 1/3 * x1 - 5 def", "x2 = np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4", "data for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult income", "'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click',", "', dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt',", "len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of Cont'][data_name] = cont_len r['#", "= x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0, 1,", "= x2 data[:, 2] = x3 data[:, 3] = x4 data[:, 4] =", "from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15, 10000) x2 = np.random.normal(35, 17, 10000)", "np.random.normal(35, 17, 10000) x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1,", "x4) def x1_to_x3(x1): return 1/3 * x1 - 5 def x1x2_to_x4(x1, x2): return", "= pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment = pd.read_csv(f'{path}/studentAssessment.csv') student_info = pd.read_csv(f'{path}/studentInfo.csv') student_regist =", "'repeatactivity_click', 'htmlactivity_click', 'code_module', 'gender', 'region', 'highest_education', 'imd_band', 'age_band', 'studied_credits', 'disability', 'final_result']] # Cell", ".merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click',", "x3 = x1_to_x3(x1) + np.random.normal(0, 1, 10000) x4 = x1x2_to_x4(x1, x2) + np.random.normal(0,", "def dict2json(dictionary: Dict[str, Any], file_name: str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile,", "adult_data = adult_data.astype( {\"age\": np.int64, \"educational-num\": np.int64, \"hours-per-week\": np.int64}) adult_data = adult_data.replace( {'workclass':", "Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight'] =", "'weight', 'weighted_score', 'forumng_click', 'homepage_click', 'oucontent_click', 'resource_click', 'subpage_click', 'url_click', 'dataplus_click', 'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click',", "load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight = sum(x['weight']) d['weight'] = total_weight if", "'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data =", "data[:, 0] = x1 data[:, 1] = x2 data[:, 2] = x3 data[:,", "outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): # raise FileNotFoundError(f\"{file_name}", "load_configs(file_name) else: old_param = {} # copy to old_param for k in param.keys():", "'10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool':", "str): with open(file_name, \"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): #", "'id_student']).apply(clicks) vle_df = grouped_vle.reset_index(None).drop(['level_3'], axis=1) student_df = student_info.merge(assessment_df, on=['code_module', 'code_presentation', 'id_student'], how='left')\\ .merge(vle_df,", "'hours_per_week'}) return adult_data # Cell def load_learning_analytic_data(path='assets/data/oulad'): def weighted_score(x): d = {} total_weight", "sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types =", "'id_student'], how='left')\\ .merge(vle_df, on=['code_module', 'code_presentation', 'id_student'], how='left') return student_df[['num_of_prev_attempts', 'weight', 'weighted_score', 'forumng_click', 'homepage_click',", "in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv')", "'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married',", "column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain',", "'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?':", "'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar',", "clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] # for t, c in zip(types,", "= assessment_grouped.reset_index( None).drop(['level_3'], axis=1) print('preprocessing vle...') # vle grouped_vle = student_vle.merge(vle).groupby( ['activity_type', 'code_module',", "x1_to_x3(x1): return 1/3 * x1 - 5 def x1x2_to_x4(x1, x2): return x1 *", "sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment = pd.read_csv(f'{path}/assessments.csv') courses = pd.read_csv(f'{path}/courses.csv') student_assessment =", "{'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical':", "not weighted for unsubmitted ones assessment_merged = student_assessment.merge(assessment) assessment_grouped = assessment_merged.groupby( ['code_module', 'code_presentation',", "\"w\") as outfile: json.dump(dictionary, outfile, indent=4) def load_configs(file_name: Path): # if os.path.exists(file_name): #", "'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'hours-per-week', 'workclass', 'education', 'marital-status', 'occupation',", "'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service',", "10 def bn_gen(): \"\"\" modify code from: https://github.com/divyat09/cf-feasibility/blob/master/generativecf/scripts/simple-bn-gen.py \"\"\" x1 = np.random.normal(50, 15,", "'code_module', 'code_presentation', 'id_student']) sumed_vle = grouped_vle.sum().drop( ['id_site', 'date', 'week_from', 'week_to'], axis=1).reset_index() grouped_vle =", "'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) # For more", "x2, x3, x4): def sigmoid(x): return 1 / (1 + np.exp(-x)) return sigmoid(10.5", "'glossary_click', 'oucollaborate_click', 'quiz_click', 'ouelluminate_click', 'sharedsubpage_click', 'questionnaire_click', 'page_click', 'externalquiz_click', 'ouwiki_click', 'dualpane_click', 'folder_click', 'repeatactivity_click', 'htmlactivity_click',", "sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types = x['activity_type'] sum_clicks = x['sum_click'] #", "# x[f\"{t}_click\"] = c return pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)},", "1 / (1 + np.exp(-x)) return sigmoid(10.5 * ((x1 * x2) / 8100)", "the data for data analysis based on https://rpubs.com/H_Zhu/235617 :return adult_data: returns preprocessed adult", "dtype=str ) # column names from \"https://archive.ics.uci.edu/ml/datasets/Adult\" column_names = ['age', 'workclass', 'fnlwgt', 'education',", "= x1x2_to_x4(x1, x2) + np.random.normal(0, 1, 10000) y = bn_func(x1, x2, x3, x4)", "= sum( x['score'] * x['weight']) / sum(x['weight']) return pd.DataFrame(d, index=[0]) def clicks(x): types", "= len(data) cat_len = len(config['discret_cols']) cont_len = len(config['continous_cols']) r['size'][data_name] = data_size r['# of", "specified). __all__ = ['dict2json', 'load_configs', 'update_json_file', 'bn_func', 'x1_to_x3', 'x1x2_to_x4', 'bn_gen', 'load_adult_income_dataset', 'load_learning_analytic_data', 'describe']", "pd.DataFrame({f\"{t}_click\": c for t, c in zip(types, sum_clicks)}, index=[0]) print('loading pandas dataframes...') assessment" ]
[ "= None if content_type is not None: if mimetypes.guess_extension(content_type) is not None: headers", "app: \"\"\" self.act = act self.app = app def normal(self, content_type, expires, folder_file,", "StoreData act: :param StoreData app: \"\"\" self.act = act self.app = app def", "type in headers :param int or None expires: Url expires :param str folder_file:", ":param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param str", "download.py # Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from", "CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData act: :param StoreData", "normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param str or None content_type: Content", "-*- # API - cs # FileName: download.py # Version: 1.0.0 # Create:", "appid = self.act.dict['PassiveParty'] if source_file is not None: source = Source(appid, suffix=source_file) else:", "Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url", "not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not None: source", "2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth import OSS from .util import", ":param StoreData app: \"\"\" self.act = act self.app = app def normal(self, content_type,", "intranet url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if", "None content_type: Content type in headers :param int or None expires: Url expires", "if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'):", "if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not None:", ":param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if content_type is", "suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url =", "expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet", "source = Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not", "from .auth import OSS from .util import Check from act import StoreData from", "CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not", "mimetypes from .auth import OSS from .util import Check from act import StoreData", "Check from act import StoreData from .upload import FolderFile, Source from .exception import", ":param str or None content_type: Content type in headers :param int or None", "Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url", "str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param str source_file:", "\"\"\" :param str or None content_type: Content type in headers :param int or", "suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet)", "headers :param int or None expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId}", "app def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param str or None", "None: if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type} if not Check.download_expires(expires):", "Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if content_type is not None: if", "is not None: headers = {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if", "return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not None: source = Source(appid,", "if content_type is not None: if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type':", "CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid =", "= OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return {", "content_type is not None: if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type}", "\"\"\" headers = None if content_type is not None: if mimetypes.guess_extension(content_type) is not", "intranet: Return intranet url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers =", "Content type in headers :param int or None expires: Url expires :param str", "from .upload import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class Download(object): def", "act: :param StoreData app: \"\"\" self.act = act self.app = app def normal(self,", "from act import StoreData from .upload import FolderFile, Source from .exception import CSCommonErr,", "folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param str source_file: Eg: source/${FileId}.source.cs :return:", "or None expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet:", "act self.app = app def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param", "not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if", "= Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet))", "self.app = app def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param str", "source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if content_type is not None:", "- cs # FileName: download.py # Version: 1.0.0 # Create: 2018-10-27 # Modify:", "Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth import OSS from .util", "Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param str source_file: Eg: source/${FileId}.source.cs", ":param StoreData act: :param StoreData app: \"\"\" self.act = act self.app = app", "FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None):", "CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData act: :param", "\"\"\" :param StoreData act: :param StoreData app: \"\"\" self.act = act self.app =", "import mimetypes from .auth import OSS from .util import Check from act import", "source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key,", "Return intranet url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None", "# -*- coding: utf-8 -*- # API - cs # FileName: download.py #", "not None: if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type} if not", "Modify: 2018-11-07 import mimetypes from .auth import OSS from .util import Check from", ".upload import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self,", "None: source = Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet,", "= FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires,", "source_file): \"\"\" :param str or None content_type: Content type in headers :param int", "folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file", "import StoreData from .upload import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class", "folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param str source_file: Eg:", "None if content_type is not None: if mimetypes.guess_extension(content_type) is not None: headers =", "{'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if", "coding: utf-8 -*- # API - cs # FileName: download.py # Version: 1.0.0", "mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT", "\"\"\" self.act = act self.app = app def normal(self, content_type, expires, folder_file, intranet,", "self.act = act self.app = app def normal(self, content_type, expires, folder_file, intranet, source_file):", "2018-11-07 import mimetypes from .auth import OSS from .util import Check from act", ".exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData", "source_file is not None: source = Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source", "# Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth import OSS from", "not None: source = Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss =", "Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData act: :param StoreData app: \"\"\"", "if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type} if not Check.download_expires(expires): return", "from .exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\" :param", "int or None expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool", "oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode': 0, 'url': url, 'headers': headers,", "# FileName: download.py # Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import", "API - cs # FileName: download.py # Version: 1.0.0 # Create: 2018-10-27 #", "is not None: source = Source(appid, suffix=source_file) else: source = FolderFile(appid, suffix=folder_file).source oss", "act import StoreData from .upload import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr", "= act self.app = app def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\"", "import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None,", "folder_file, intranet, source_file): \"\"\" :param str or None content_type: Content type in headers", "else: source = FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET',", "return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is", ".auth import OSS from .util import Check from act import StoreData from .upload", "oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return", "# API - cs # FileName: download.py # Version: 1.0.0 # Create: 2018-10-27", "import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData act:", "str or None content_type: Content type in headers :param int or None expires:", "not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return", "OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode':", "not None: headers = {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not", "import Check from act import StoreData from .upload import FolderFile, Source from .exception", "1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth import OSS", "def __init__(self, act=None, app=None): \"\"\" :param StoreData act: :param StoreData app: \"\"\" self.act", "self.act.dict['PassiveParty'] if source_file is not None: source = Source(appid, suffix=source_file) else: source =", ":return: \"\"\" headers = None if content_type is not None: if mimetypes.guess_extension(content_type) is", "-*- coding: utf-8 -*- # API - cs # FileName: download.py # Version:", "__init__(self, act=None, app=None): \"\"\" :param StoreData act: :param StoreData app: \"\"\" self.act =", "StoreData app: \"\"\" self.act = act self.app = app def normal(self, content_type, expires,", "None: headers = {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'):", "return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid", "def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param str or None content_type:", "expires, folder_file, intranet, source_file): \"\"\" :param str or None content_type: Content type in", ":param int or None expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param", "bool intranet: Return intranet url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers", "# Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth", "import OSS from .util import Check from act import StoreData from .upload import", "headers = None if content_type is not None: if mimetypes.guess_extension(content_type) is not None:", "source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not None: source =", "Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE", "expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return intranet url :param", "if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not source_file.startswith('source/'): return CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty']", "= app def normal(self, content_type, expires, folder_file, intranet, source_file): \"\"\" :param str or", "expires, headers, intranet=intranet) return { 'errcode': 0, 'url': url, 'headers': headers, 'source': source.suffix,", "or None content_type: Content type in headers :param int or None expires: Url", "source.key, expires, headers, intranet=intranet) return { 'errcode': 0, 'url': url, 'headers': headers, 'source':", "utf-8 -*- # API - cs # FileName: download.py # Version: 1.0.0 #", "OSS from .util import Check from act import StoreData from .upload import FolderFile,", "extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode': 0,", "= {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER", "act=None, app=None): \"\"\" :param StoreData act: :param StoreData app: \"\"\" self.act = act", "= self.act.dict['PassiveParty'] if source_file is not None: source = Source(appid, suffix=source_file) else: source", "Source from .exception import CSCommonErr, CSDownloadErr class Download(object): def __init__(self, act=None, app=None): \"\"\"", ".util import Check from act import StoreData from .upload import FolderFile, Source from", "FolderFile(appid, suffix=folder_file).source oss = OSS(intranet=intranet, extranet=(not intranet)) url = oss.sign_url('GET', source.key, expires, headers,", "in headers :param int or None expires: Url expires :param str folder_file: Eg:", "Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes from .auth import", "# Modify: 2018-11-07 import mimetypes from .auth import OSS from .util import Check", "class Download(object): def __init__(self, act=None, app=None): \"\"\" :param StoreData act: :param StoreData app:", "str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if content_type is not", "app=None): \"\"\" :param StoreData act: :param StoreData app: \"\"\" self.act = act self.app", "headers = {'Content-Type': content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return", "intranet)) url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode': 0, 'url':", "FileName: download.py # Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07 import mimetypes", ":param bool intranet: Return intranet url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\"", "url :param str source_file: Eg: source/${FileId}.source.cs :return: \"\"\" headers = None if content_type", "is not None: if mimetypes.guess_extension(content_type) is not None: headers = {'Content-Type': content_type} if", "from .util import Check from act import StoreData from .upload import FolderFile, Source", "CSCommonErr.INVALID_SOURCE appid = self.act.dict['PassiveParty'] if source_file is not None: source = Source(appid, suffix=source_file)", "headers, intranet=intranet) return { 'errcode': 0, 'url': url, 'headers': headers, 'source': source.suffix, }", "cs # FileName: download.py # Version: 1.0.0 # Create: 2018-10-27 # Modify: 2018-11-07", "source/${FileId}.source.cs :return: \"\"\" headers = None if content_type is not None: if mimetypes.guess_extension(content_type)", "intranet, source_file): \"\"\" :param str or None content_type: Content type in headers :param", "if source_file is not None: source = Source(appid, suffix=source_file) else: source = FolderFile(appid,", "url = oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode': 0, 'url': url,", "StoreData from .upload import FolderFile, Source from .exception import CSCommonErr, CSDownloadErr class Download(object):", "= oss.sign_url('GET', source.key, expires, headers, intranet=intranet) return { 'errcode': 0, 'url': url, 'headers':", "content_type: Content type in headers :param int or None expires: Url expires :param", "content_type} if not Check.download_expires(expires): return CSDownloadErr.EXPIRES_LIMIT if not folder_file.startswith('folder/'): return CSCommonErr.INVALID_FOLDER if not", "content_type, expires, folder_file, intranet, source_file): \"\"\" :param str or None content_type: Content type", "None expires: Url expires :param str folder_file: Eg: folder/${FolderId}/.../t=${CreateTime}&n=${FileName}&i=${FileId} :param bool intranet: Return" ]
[ "port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name = \"AWSpider", "uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s", "def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid, error))", "name == None: name = \"AWSpider Data Server UUID: %s\" % self.uuid resource", "error)) return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main HTTP", "stopping on main HTTP interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred):", "twisted.web import server from twisted.internet import reactor from .base import BaseServer, LOGGER from", "name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from", "deferreds = [] LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name) d =", "self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d", "aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None:", "len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def", "LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d,", "__init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name", "aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def", "d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" %", "% self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0:", "d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from", "aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name", "0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def _shutdownCallback(self, data):", "self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket,", "(uuid, error)) return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main", "from .base import BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer): def __init__(self,", "if name == None: name = \"AWSpider Data Server UUID: %s\" % self.uuid", "= [] LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name) d = self.site_port.stopListening()", "def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if", "port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" %", "= \"AWSpider Data Server UUID: %s\" % self.uuid resource = DataResource(self) self.site_port =", "%s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not", "on main HTTP interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d)", "d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"])", "error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid, error)) return error", "LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could", "Server UUID: %s\" % self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__(", "import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None,", "= DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def _shutdownCallback(self, data): return BaseServer.shutdown(self)", "import reactor from .base import BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer):", "S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d", "LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid, error)) return error def shutdown(self):", "getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback,", "server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port)", "uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid):", "% (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from", "d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data,", "aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name =", "uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\"", "% (uuid, error)) return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on", "max_simultaneous_requests=50): if name == None: name = \"AWSpider Data Server UUID: %s\" %", "S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s", "Deferred, DeferredList from twisted.web import server from twisted.internet import reactor from .base import", "BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket,", "log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name = \"AWSpider Data Server UUID:", "twisted.internet import reactor from .base import BaseServer, LOGGER from ..resources import DataResource class", "log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting", "_getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid, error)) return", "deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return", "twisted.internet.defer import Deferred, DeferredList from twisted.web import server from twisted.internet import reactor from", "aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket)", "data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error,", "Data Server UUID: %s\" % self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource))", "(uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\"", "interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) >", "log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name = \"AWSpider Data", "\"AWSpider Data Server UUID: %s\" % self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port,", "%s\" % self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id,", "cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid,", "%s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid)", "== None: name = \"AWSpider Data Server UUID: %s\" % self.uuid resource =", "DeferredList from twisted.web import server from twisted.internet import reactor from .base import BaseServer,", "if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return", "shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name) d", "LOGGER.debug(\"Getting %s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback,", "from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return", "_getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self,", "DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50):", "server from twisted.internet import reactor from .base import BaseServer, LOGGER from ..resources import", "clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d", "from twisted.web import server from twisted.internet import reactor from .base import BaseServer, LOGGER", "aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return", "= self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds)", "..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log',", "import server from twisted.internet import reactor from .base import BaseServer, LOGGER from ..resources", "= self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid):", "def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def", "uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid))", "d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def _shutdownCallback(self, data): return", "isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d", "def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid)", "log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid):", "DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\",", "import BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key,", "aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name ==", "return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" %", "[] LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name) d = self.site_port.stopListening() if", "import Deferred, DeferredList from twisted.web import server from twisted.internet import reactor from .base", "class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002, log_file='dataserver.log', log_directory=None, log_level=\"debug\", name=None,", "uuid): LOGGER.error(\"Could not get %s from S3.\\n%s\" % (uuid, error)) return error def", "self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self):", "LOGGER from ..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain,", "reactor from .base import BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer): def", "from ..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket, aws_sdb_reservation_domain, port=5002,", "get %s from S3.\\n%s\" % (uuid, error)) return error def shutdown(self): deferreds =", "S3.\\n%s\" % (uuid, error)) return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping", "self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got", "HTTP interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds)", "return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main HTTP interface.\"", "name=None, max_simultaneous_requests=50): if name == None: name = \"AWSpider Data Server UUID: %s\"", "def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid)", "from S3.\\n%s\" % (uuid, error)) return error def shutdown(self): deferreds = [] LOGGER.debug(\"%s", "aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self,", "DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory,", "uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def _getCallback(self,", "from S3.\" % (uuid)) return cPickle.loads(data[\"response\"]) def _getErrback(self, error, uuid): LOGGER.error(\"Could not get", "= reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name,", "return d def _getCallback(self, data, uuid): LOGGER.debug(\"Got %s from S3.\" % (uuid)) return", "from twisted.internet.defer import Deferred, DeferredList from twisted.web import server from twisted.internet import reactor", "from twisted.internet import reactor from .base import BaseServer, LOGGER from ..resources import DataResource", "BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def", "Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else:", "% self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key,", "return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d =", "main HTTP interface.\" % self.name) d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if", "max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s from S3.\"", "self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback)", "None: name = \"AWSpider Data Server UUID: %s\" % self.uuid resource = DataResource(self)", "> 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None) def _shutdownCallback(self,", "self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level,", "= DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file,", "log_directory=None, log_level=\"debug\", name=None, max_simultaneous_requests=50): if name == None: name = \"AWSpider Data Server", "UUID: %s\" % self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self,", "reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain, log_file=log_file, log_directory=log_directory, log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests,", "uuid): LOGGER.debug(\"Getting %s from S3.\" % uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid)", "name = \"AWSpider Data Server UUID: %s\" % self.uuid resource = DataResource(self) self.site_port", "self.uuid resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket,", "def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main HTTP interface.\" % self.name)", "resource = DataResource(self) self.site_port = reactor.listenTCP(port, server.Site(resource)) BaseServer.__init__( self, aws_access_key_id, aws_secret_access_key, aws_s3_storage_bucket=aws_s3_storage_bucket, aws_sdb_reservation_domain=aws_sdb_reservation_domain,", "%s from S3.\\n%s\" % (uuid, error)) return error def shutdown(self): deferreds = []", "error def shutdown(self): deferreds = [] LOGGER.debug(\"%s stopping on main HTTP interface.\" %", "not get %s from S3.\\n%s\" % (uuid, error)) return error def shutdown(self): deferreds", "d = self.site_port.stopListening() if isinstance(d, Deferred): deferreds.append(d) if len(deferreds) > 0: d =", "log_level=log_level, name=name, max_simultaneous_requests=max_simultaneous_requests, port=port) def clearStorage(self): return self.s3.emptyBucket(self.aws_s3_storage_bucket) def getData(self, uuid): LOGGER.debug(\"Getting %s", "% uuid) d = self.s3.getObject(self.aws_s3_storage_bucket, uuid) d.addCallback(self._getCallback, uuid) d.addErrback(self._getErrback, uuid) return d def", "if len(deferreds) > 0: d = DeferredList(deferreds) d.addCallback(self._shutdownCallback) return d else: return self._shutdownCallback(None)", ".base import BaseServer, LOGGER from ..resources import DataResource class DataServer(BaseServer): def __init__(self, aws_access_key_id," ]
[ "\"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec)", "\"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this", "builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World", "'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template", "= ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node =", "= NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities':", "init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability =", "{'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template", "\"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\":", "False, \"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\":", "\"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message", "{\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}], \"auxilary\": {", "'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName':", "{\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing the data from local DMaaP,", "\"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\":", "{'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node =", "\"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\",", "spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\":", "sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type':", "{'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db)", "receiving VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\":", "\"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port", "{'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs':", "VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{", "configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\",", "events through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\":", "sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName':", "for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\":", "'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName',", "{\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as self serve component.\", \"sourced_at_deployment\":", "or MR, processing them and publishing them as PM files to local DMaaP", "name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\":", "'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}},", "'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties':", "= ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return", "[], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\":", "{ \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\":", "{'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName':", "{'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates':", "\"value\": \"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\":", "of keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\":", "sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability':", "through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\",", "{'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes':", "port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}],", "\"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\":", "[{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}},", "files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\":", "{'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement':", "\"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type monitorng\", \"type\":", "db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type':", "'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\":", "\"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder = ToscaBuilder()", "{'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability':", "db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute',", "{'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec():", "from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from", "{\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\":", "\"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder", "and publishing them as PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\":", "[], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for", "ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model =", "{'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db)", "monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\",", "World mS for subscribing the data from local DMaaP, DR or MR, processing", "from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\":", "\"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\":", "\"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration from dti.\",", "return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\":", "\"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\":", "\"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\":", "\"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\":", "]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\":", "\"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True,", "\"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf", "'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None,", "NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName':", "def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db)", "template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector", "\"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"},", "{\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder =", "\"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type monitorng\",", "toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder", "\"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\",", "\"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for specific", "\"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\":", "CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties':", "{'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName':", "None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements':", "{'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability':", "db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type':", "spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing the data", "from local DMaaP, DR or MR, processing them and publishing them as PM", "[{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute',", "{\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\":", "\"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\",", "{'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None,", "{'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\":", "'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]})", "\"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\":", "import ToscaTopology from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import", "\"docker\", \"description\": \"Hello World mS for subscribing the data from local DMaaP, DR", "\"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure", "'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id':", "init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing the", "= NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements':", "{ \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard", "[{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": {", "{\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\":", "policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy',", "'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node", "\"boolean\", \"value\": \"false\"}] } ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return", "DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\":", "import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR,", "interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\",", "\"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\",", "[{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName':", "\"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\":", "{\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\":", "builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"},", "ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty':", "[{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\":", "\"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\",", "\"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]},", "\"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\":", "capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}},", "\"false\", \"description\": \"Is this used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False,", "\"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\":", "PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [],", "\"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"},", "{\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\":", "\"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] }", "\"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}], \"auxilary\": { \"healthcheck\": {", "\"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker", "\"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\":", "\"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return", "\"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\":", "import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model", "{'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\",", "builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\":", "ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\":", "\"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\":", "\"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\":", "True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration from", "\"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False,", "True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as self serve component.\",", "\"Is this used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False,", "used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}],", "'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId',", "\"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\",", "'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node)", "\"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector", "\"false\"}] } ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def", "False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\":", "[{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443,", "{\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\":", "\"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List", "{\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\":", "{\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\":", "'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db = ToscaDB() sub_capability =", "objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"},", "[{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\",", "{'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability)", "\"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\":", "person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends", "\"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\":", "\"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\",", "ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\",", "CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id':", "return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\":", "= os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db =", "{'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type':", "\"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID", "[]}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\":", "\"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\":", "component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\",", "toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os", "{'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template():", "local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\":", "= CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}})", "sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}},", "\"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\",", "{'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}},", "this used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\":", "CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml')", "sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties':", "\"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events through restful interface\", \"component_type\": \"docker\"},", "= os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def", "DMaaP, DR or MR, processing them and publishing them as PM files to", "= CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName',", "builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for", "\"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\",", "'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type':", "subscribing the data from local DMaaP, DR or MR, processing them and publishing", "os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir,", "template._parse_content(db) return template def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty':", "\"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]}", "type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\":", "location of keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\",", "\"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\",", "\"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]} builder", "'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db =", "\"fs location of keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\":", "{\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec)", "init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{", "db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}})", "\"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location", "def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability", "vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\",", "\"description\": \"SAM Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\":", "= ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\",", "'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities':", "\"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model)", "import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model =", "\"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as", "template def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}})", "\"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events through restful interface\",", "them as PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\":", "to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []},", "'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs':", "\"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location", "def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing", "'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db", "\"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\":", "\"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"},", "\"description\": \"Is this used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\":", "ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\":", "set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\",", "{ \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{", "\"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec =", "None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def", "\"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for", "[{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\":", "sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id':", "\"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080,", "\"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}},", "\"description\": \"fs location of keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\",", "\"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events through restful interface\", \"component_type\":", "from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__))", "keystore in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\",", "NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR =", "= ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\":", "from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir,", "builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello", "{'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type':", "'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": {", "{\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\",", "NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement':", "'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName':", "dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\",", "{'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName':", "\"value\": \"false\", \"description\": \"Is this used as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\":", "= NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements':", "'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}},", "port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\",", "\"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name", "\"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\":", "= os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties':", "{'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability':", "on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\":", "\"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\":", "{'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}})", "publishing them as PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"},", "'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db = ToscaDB() sub_capability", "{'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId',", "\"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\":", "\"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder", "depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True},", "db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName':", "ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName',", "\"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\":", "\"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used", "return builder def init_tosca_builder_with_hello_world_spec_k8(): spec = {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS", "\"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\":", "toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml')", "{ \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"},", "\"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\",", "restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\":", "\"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\":", "'../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}})", "builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\":", "\"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\",", "'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type':", "{'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec =", "import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR", "\"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\":", "[{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"},", "\"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{", "\"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\":", "\"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder", "{'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type)", "\"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]}", "\"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\":", "'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type':", "in vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\":", "False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\",", "os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type", "\"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\":", "[{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type monitorng\", \"type\": \"list\",", "\"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{", "\"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\",", "{ \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM", "[{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}]", "\"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs", "\"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"},", "os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB()", "\"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\":", "\"List of objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\",", "the data from local DMaaP, DR or MR, processing them and publishing them", "\"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as self serve", "{'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node)", "\"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{", "ToscaTopology from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType", "\"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type", "{'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec", "{'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType',", "\"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ]", "} ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8():", "<filename>app/tests/utils/test_utils.py from toscalib.templates.topology import ToscaTopology from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType", "\"test_spec_ss\", \"description\": \"Collector for receiving VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\":", "{'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName',", "\"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"},", "\"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\": \"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\":", "template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\",", "{'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability':", "from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import", "'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}},", "as self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\":", "sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type =", "False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\",", "[]}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the", "{\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\":", "processing them and publishing them as PM files to local DMaaP DR\", \"name\":", "them and publishing them as PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\",", "{\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\":", "\"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore", "= {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\",", "'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template def init_tosca_builder_with_schema_and_spec(): spec = {\"self\":", "\"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": {", "{\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\":", "\"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on configuration", "\"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\":", "{'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db)", "Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\":", "for subscribing the data from local DMaaP, DR or MR, processing them and", "{\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\":", "\"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\":", "{'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type)", "init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability)", "\"docker\"}, \"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\": \"\",", "\"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder = ToscaBuilder() builder.import_schema(meta_model)", "\"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]},", "'../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type =", "{\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events through", "CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR,", "def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\", \"component_type\": \"docker\"}, \"parameters\":", "{\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered", "{'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template =", "'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template", "\"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\":", "template = ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db)", "= {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events", "builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": {", "\"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\":", "\"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in", "\"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\":", "ToscaTopology('subTemplateName', None, {'inputs': {'propertyName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'substituteNodeType'}}}) template._parse_content(db) return template", "\"parameters\": [{ \"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\": \"\", \"type\":", "MR, processing them and publishing them as PM files to local DMaaP DR\",", "from toscalib.templates.topology import ToscaTopology from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from", "'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements':", "def init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for", "init_tosca_builder_with_schema_and_spec(): spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving", "{\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\":", "{ \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model)", "\"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False},", "\"name\": \"clliLocationMappingClliFutureUse3\", \"description\": \"SAM Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\":", "\"name\": \"elementType\", \"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\":", "\"false\"}, {\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]} builder =", "vm\"}], \"auxilary\": { \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}}", "= ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type':", "\"description\": \"Collector for receiving VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\": {", "\"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\",", "'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) node_type._parse_content(db)", "NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement':", "{'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def init_sub_template(): db = ToscaDB()", "mS for subscribing the data from local DMaaP, DR or MR, processing them", "'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type':", "toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder import os CURR_DIR = os.path.dirname(os.path.abspath(__file__)) meta_model", "db._import_capability_type(sub_capability) node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type':", "\"description\": \"Hello World mS for subscribing the data from local DMaaP, DR or", "serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\":", "\"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [], \"publishes\": [{ \"format\": \"VES_specification\", \"version\": \"5.28.4\", \"type\":", "\"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\":", "8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of", "\"vnfTypeSpecificData\", \"description\": \"List of objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{", "\"vcc_hello_name\", \"value\": \"\", \"description\": \"the name entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True,", "\"SAM Collector clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True,", "] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec", "\"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"},", "\"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}},", "\"value\": \"\"}, {\"name\": \"monitoringTasks\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"HostGroupSetCommonLinuxSNMP\", \"type\": \"boolean\", \"value\":", "{\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing the data from local", "data from local DMaaP, DR or MR, processing them and publishing them as", "builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec = {\"self\": { \"version\": \"0.1.6\", \"name\": \"DcaeSamCollector\",", "True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component depends on", "as PM files to local DMaaP DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\":", "{'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type':", "\"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of", "\"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is", "CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db)", "{'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties':", "'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return", "clli=location ID set\", \"value\": \"\", \"type\": \"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\",", "ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType',", "'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db) db._import_node_type(sub_node) template =", "template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'}, 'node2':", "return template def init_sub_template(): db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type':", "\"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\":", "\"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def", "\"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []},", "\"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http", "\"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\":", "\"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}], \"auxilary\": { \"healthcheck\":", "}]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_hello_world_spec_k8(): spec =", "{'nodeName': {'type': 'nodeTypeName'}, 'node2': {'type': 'nodeTypeName', 'requirements': [{'dummyRequirement': 'nodeName'}]}}}) template._parse_content(db) return template def", "spec = {\"self\": { \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES", "toscalib.templates.topology import ToscaTopology from toscalib.templates.database import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability", "{\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\":", "meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db", "\"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\": \"fs location of keystore in vm\"}], \"auxilary\":", "\"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec(): spec", "\"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\":", "\"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return builder def init_tosca_builder_with_policy_schema_and_spec():", "for receiving VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\": [],", "= CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId',", "self serve component.\", \"sourced_at_deployment\": False, \"designer_editable\": False, \"policy_editable\": False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\":", "{ \"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder =", "db = ToscaDB() sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability._parse_content(db) db._import_capability_type(sub_capability) sub_node", "os.path.dirname(os.path.abspath(__file__)) meta_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_tosca_schema.yaml') policy_model = os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template():", "\"description\": \"List of objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\":", "\"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec)", "\"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\", \"policy_editable\": False, \"required\":", "DR or MR, processing them and publishing them as PM files to local", "\"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\":", "os.path.join(CURR_DIR, os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty':", "\"string\"}, {\"name\": \"vnfFaultMonProvisionPolicy\", \"policy_editable\": True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\",", "{\"name\": \"HostGroupSetNagent_Common_Linux\", \"type\": \"boolean\", \"value\": \"false\"}] } ] }]} ]} builder = ToscaBuilder()", "\"healthcheck\": { \"type\": \"http\", \"interval\": \"15s\", \"timeout\": \"1s\", \"endpoint\": \"/healthcheck\"}}} builder = ToscaBuilder()", "import ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import", "node_type._parse_content(db) sub_node._parse_content(db) db._import_node_type(node_type) db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates':", "True, \"policy_group\": \"DcaeSamCollector_vnfFaultMonProvisionPolicy\", \"type\": \"string\", \"policy_schema\": [{ \"name\": \"vnfTypeSpecificData\", \"description\": \"List of objects", "node_type = NodeType('nodeTypeName', {'id': 'nodeId', 'attributes': {'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}},", "of objects for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\":", "\"value\": False, \"description\": \"component depends on configuration from dti.\", \"sourced_at_deployment\": \"false\", \"designer_editable\": \"false\",", "specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False, \"description\": \"component", "\"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}},", "capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type':", "{ \"version\": \"1.1.0\", \"name\": \"test_spec_ss\", \"description\": \"Collector for receiving VES events through restful", "\"Collector for receiving VES events through restful interface\", \"component_type\": \"docker\"}, \"streams\": { \"subscribes\":", "\"parameters\": [{ \"name\": \"collector.service.port\", \"value\": 8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\":", "db._import_node_type(sub_node) template = ToscaTopology('templateName', None, {'inputs': {'inputName': {'type': 'string'}}, 'node_templates': {'nodeName': {'type': 'nodeTypeName'},", "False, \"required\": \"true\"}], \"auxilary\": {\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"},", "DR\", \"name\": \"dcae.collectors.vcc.helloworld.pm\", \"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [],", "[{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"}, \"host\": {\"path\": \"/opt/app/dcae-certificate\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\":", "http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\":", "\"VES_specification\", \"version\": \"5.28.4\", \"type\": \"message router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\":", "router\", \"config_key\": \"ves_sipsignaling\"}]}, \"services\": { \"provides\": [{ \"route\": \"/eventListener/v5\", \"verb\": \"POST\", \"request\": {", "\"version\": \"1.0.1\"}, \"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\":", "8080, \"description\": \"standard http port\"}, {\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"},", "\"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}},", "{\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/AGENT\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/AGENT\"}}, {\"container\": {\"bind\": \"/opt/logs/DCAE/dmd/WATCHER\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\":", "= {\"self\": {\"component_type\": \"docker\", \"description\": \"Hello World mS for subscribing the data from", "{\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder", "'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]}) sub_node._parse_content(db)", "\"/eventListener/v5\", \"verb\": \"POST\", \"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\",", "\"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]} builder = ToscaBuilder() builder.import_schema(meta_model) builder.import_spec_str(spec) return", "local DMaaP, DR or MR, processing them and publishing them as PM files", "= ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type': 'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties':", "{\"path\": \"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\":", "'string'}}}) sub_capability = CapabilityType('tosca.capabilities.substitute', {'properties': {'capabilityProperty': {'type': 'string'}}}) capability_type._parse_content(db) sub_capability._parse_content(db) db._import_capability_type(capability_type) db._import_capability_type(sub_capability) node_type", "False, \"required\": True}, {\"name\": \"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as self", "entered for specific person\",\"sourced_at_deployment\": True, \"designer_editable\": True, \"policy_editable\": False}, {\"name\": \"useDtiConfig\", \"value\": False,", "[], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\", \"value\": \"\",", "ToscaDB from toscalib.types.node import NodeType from toscalib.types.capability import CapabilityType from toscalib.tosca_builder import ToscaBuilder", "\"/opt/logs/DCAE/helloworldpm/dmd/WATCHER\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/logs/DCAE\"}, \"host\": {\"path\": \"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]},", "{'attributeName': {'type': 'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement':", "\"request\": { \"format\": \"VES_specification\", \"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\":", "\"services\": {\"calls\": [], \"provides\": []}, \"streams\": {\"publishes\": [], \"subscribes\": []}, \"parameters\": [{\"name\": \"vcc_hello_name\",", "os.pardir, '../data/meta_model/meta_policy_schema.yaml') def init_template(): db = ToscaDB() capability_type = CapabilityType('tosca.capabilities.dummy', {'properties': {'capabilityProperty': {'type':", "{\"name\": \"collector.service.secure.port\", \"value\": 8443, \"description\": \"secure port \"}, {\"name\": \"collector.keystore.file.location\", \"value\": \"/opt/app/dcae-certificate/keystore.jks\", \"description\":", "{\"healthcheck\": {\"interval\": \"60s\", \"timeout\": \"20s\", \"script\": \"/opt/app/vcc/bin/common/HealthCheck_HelloWorld.sh\", \"type\": \"docker\"}, \"volumes\": [{\"container\": {\"bind\": \"/opt/app/dcae-certificate\"},", "\"isSelfServeComponent\", \"value\": \"false\", \"description\": \"Is this used as self serve component.\", \"sourced_at_deployment\": False,", "\"Hello World mS for subscribing the data from local DMaaP, DR or MR,", "sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}},", "\"version\": \"5.28.4\"}, \"response\": { \"format\": \"ves.coll.response\", \"version\": \"1.0.0\"}}]}, \"parameters\": [{ \"name\": \"collector.service.port\", \"value\":", "\"value\": \"false\"}] } ] }]} ]} builder = ToscaBuilder() builder.import_schema(policy_model) builder.import_spec_str(spec) return builder", "\"/opt/logs/DCAE/helloworldpm/vcc-logs\"}}, {\"container\": {\"bind\": \"/opt/app/vcc/archive/data\"}, \"host\": {\"path\": \"/opt/data/DCAE/helloworldpm/vcc-archive\"}}]}, \"artifacts\": [{\"type\": \"docker image\", \"uri\": \"dockercentral.it.att.com:5100/com.att.dcae.controller/dcae-controller-vcc-helloworld-pm:18.02-001\"}]}", "'string'}}, 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'capabilityName': {'type': 'tosca.capabilities.dummy'}}, 'requirements': [{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]})", "[{'dummyRequirement': {'capability': 'tosca.capabilities.dummy'}}]}) sub_node = NodeType('substituteNodeType', {'id': 'subNodeId', 'properties': {'inputName': {'type': 'string'}}, 'capabilities':", "'subNodeId', 'properties': {'propertyName': {'type': 'string'}}, 'capabilities': {'substituteCapability': {'type': 'tosca.capabilities.substitute'}}, 'requirements': [{'substituteRequirement': {'capability': 'tosca.capabilities.substitute'}}]})", "for vnf type monitorng\", \"type\": \"list\", \"entry_schema\": [{ \"name\": \"elementType\", \"value\": \"\"}, {\"name\":" ]
[ "Mainly used to create error message. val_type: Type Expected target type. Raises ------", "Any, val_name: str, val_range: List) -> None: \"\"\"Raise if ``val`` is not in", "------ ValueError When ``val`` is an empty :py:class:`str`. \"\"\" if not val: raise", "\"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path:", "directory. Parameters ---------- path: str Test path. Raises ------ FileExistsError When ``path`` exists", "type. Raises ------ TypeError When ``val`` is not an instance of ``val_type``. \"\"\"", "os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise", "not in ``val_range``. Parameters ---------- val: Any Test target. val_name: str Test target", "i in range(len(vals) - 1): if vals[i] > vals[i + 1]: raise ValueError(f'Must", "val_type: Type Expected target type. Raises ------ TypeError When ``val`` is not an", "such that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets. val_names:", "that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets. val_names: list[str]", "---------- val: str Test target. val_name: str Test target name. Mainly used to", "targets' names. Mainly used to create error message. Raises ------ ValueError When there", "name. Mainly used to create error message. Raises ------ ValueError When ``val`` is", "f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) ->", "int]], val_names: List[str]) -> None: \"\"\"Raise if there exist some ``i < j``", "Parameters ---------- val: Any Test target. val_name: str Test target name. Mainly used", "------ ValueError When ``val`` is not in ``val_range``. \"\"\" if val not in", "raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]],", "error message. val_range: list Expected value range. Raises ------ ValueError When ``val`` is", "val not in val_range: raise ValueError( f'`{val_name}` must be one of the following", "be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) ->", "\"\"\"Raise if ``val`` is not an instance of ``val_type``. Parameters ---------- val: Any", "raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if there exist some", "None: \"\"\"Raise if there exist some ``i < j`` such that ``vals[i] >", "val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None: \"\"\"Raise", "Test targets. val_names: list[str] Test targets' names. Mainly used to create error message.", "if val not in val_range: raise ValueError( f'`{val_name}` must be one of the", "to create error message. Raises ------ ValueError When ``val`` is an empty :py:class:`str`.", "of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if", "-> None: \"\"\"Raise if ``val`` is not in ``val_range``. Parameters ---------- val: Any", "\"\"\"Raise if there exist some ``i < j`` such that ``vals[i] > vals[j]``.", "message. Raises ------ ValueError When there exist some ``i < j`` such that", "os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str) ->", "val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path: str) -> None:", "is not an instance of ``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}`", "------ FileExistsError When ``path`` exists and is a file. \"\"\" if os.path.exists(path) and", "if ``path`` exists and is a directory. Parameters ---------- path: str Test path.", "an empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must be non-empty `str`.')", "List[str]) -> None: \"\"\"Raise if there exist some ``i < j`` such that", "\"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters ---------- val: str Test target.", "str Test path. Raises ------ FileExistsError When ``path`` exists and is a directory.", "one of the following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def", "raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if", "a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def", "``val`` is not an instance of ``val_type``. \"\"\" if not isinstance(val, val_type): raise", "targets. val_names: list[str] Test targets' names. Mainly used to create error message. Raises", "error message. Raises ------ ValueError When there exist some ``i < j`` such", "if not val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path: str)", "vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if there exist some ``i", "an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None:", "FileExistsError When ``path`` exists and is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path):", "raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is a directory.", "FileExistsError When ``path`` exists and is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path):", "Test path. Raises ------ FileExistsError When ``path`` exists and is a directory. \"\"\"", "When ``path`` exists and is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise", "val_type): raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float,", "def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is a", "Test path. Raises ------ FileExistsError When ``path`` exists and is a file. \"\"\"", "int]] Test targets. val_names: list[str] Test targets' names. Mainly used to create error", "a directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path`` exists and", "raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise if ``val`` is an empty", "str, val_name: str) -> None: \"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters", "f'`{val_name}` must be one of the following values:' + ''.join(map(lambda v: f'\\n- {v}',", "for i in range(len(vals) - 1): if vals[i] > vals[i + 1]: raise", "------ ValueError When there exist some ``i < j`` such that ``vals[i] >", "Union def raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise if ``val`` is", "exist some ``i < j`` such that ``vals[i] > vals[j]``. Parameters ---------- vals:", "if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str)", "used to create error message. Raises ------ ValueError When there exist some ``i", "some ``i < j`` such that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float,", "Type, Union def raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise if ``val``", "file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*,", "target name. Mainly used to create error message. val_range: list Expected value range.", "``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance of", "Type Expected target type. Raises ------ TypeError When ``val`` is not an instance", "non-empty `str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path`` exists and", "None: \"\"\"Raise if ``path`` exists and is a directory. Parameters ---------- path: str", "of the following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*,", "List) -> None: \"\"\"Raise if ``val`` is not in ``val_range``. Parameters ---------- val:", "there exist some ``i < j`` such that ``vals[i] > vals[j]``. \"\"\" for", "``val`` is an empty :py:class:`str`. Parameters ---------- val: str Test target. val_name: str", "is an empty :py:class:`str`. Parameters ---------- val: str Test target. val_name: str Test", "name. Mainly used to create error message. val_range: list Expected value range. Raises", "from typing import Any, List, Type, Union def raise_if_empty_str(*, val: str, val_name: str)", "-> None: \"\"\"Raise if ``path`` exists and is a directory. Parameters ---------- path:", "is a file. Parameters ---------- path: str Test path. Raises ------ FileExistsError When", "create error message. val_range: list Expected value range. Raises ------ ValueError When ``val``", "``path`` exists and is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path}", "list[str] Test targets' names. Mainly used to create error message. Raises ------ ValueError", "raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise", "if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any,", "Expected target type. Raises ------ TypeError When ``val`` is not an instance of", "`{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if there", "error message. val_type: Type Expected target type. Raises ------ TypeError When ``val`` is", "FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path``", "vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets. val_names: list[str] Test targets' names.", "str Test target. val_name: str Test target name. Mainly used to create error", "-> None: \"\"\"Raise if there exist some ``i < j`` such that ``vals[i]", "When ``val`` is not an instance of ``val_type``. \"\"\" if not isinstance(val, val_type):", "and is a directory. Parameters ---------- path: str Test path. Raises ------ FileExistsError", "< j`` such that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test", "exist some ``i < j`` such that ``vals[i] > vals[j]``. \"\"\" for i", "import os from typing import Any, List, Type, Union def raise_if_empty_str(*, val: str,", "------ TypeError When ``val`` is not an instance of ``val_type``. \"\"\" if not", "v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type)", "---------- vals: list[Union[float, int]] Test targets. val_names: list[str] Test targets' names. Mainly used", "raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any, val_name: str, val_range: List)", "> vals[j]``. \"\"\" for i in range(len(vals) - 1): if vals[i] > vals[i", "val_name: str Test target name. Mainly used to create error message. val_type: Type", "ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if", "exists and is a file. Parameters ---------- path: str Test path. Raises ------", "str Test target name. Mainly used to create error message. val_range: list Expected", "Parameters ---------- val: str Test target. val_name: str Test target name. Mainly used", "is a directory. Parameters ---------- path: str Test path. Raises ------ FileExistsError When", "must be one of the following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range))", "is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.')", "str) -> None: \"\"\"Raise if ``path`` exists and is a directory. Parameters ----------", "When there exist some ``i < j`` such that ``vals[i] > vals[j]``. \"\"\"", "When ``val`` is not in ``val_range``. \"\"\" if val not in val_range: raise", "in val_range: raise ValueError( f'`{val_name}` must be one of the following values:' +", "FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any, val_name: str, val_range: List) ->", "def raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise if ``val`` is an", "following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any,", "raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None: \"\"\"Raise if ``val`` is", "raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is a file.", "val: Any, val_name: str, val_range: List) -> None: \"\"\"Raise if ``val`` is not", "Test target. val_name: str Test target name. Mainly used to create error message.", "path. Raises ------ FileExistsError When ``path`` exists and is a file. \"\"\" if", "and is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a", "\"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.')", "path: str) -> None: \"\"\"Raise if ``path`` exists and is a file. Parameters", "is a directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path`` exists", "vals: list[Union[float, int]] Test targets. val_names: list[str] Test targets' names. Mainly used to", "raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None: \"\"\"Raise if ``val`` is", "str Test path. Raises ------ FileExistsError When ``path`` exists and is a file.", "empty :py:class:`str`. Parameters ---------- val: str Test target. val_name: str Test target name.", "in ``val_range``. \"\"\" if val not in val_range: raise ValueError( f'`{val_name}` must be", "TypeError When ``val`` is not an instance of ``val_type``. \"\"\" if not isinstance(val,", "`str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is", "target name. Mainly used to create error message. Raises ------ ValueError When ``val``", "``i < j`` such that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]]", "be non-empty `str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path`` exists", "list Expected value range. Raises ------ ValueError When ``val`` is not in ``val_range``.", "val_range: raise ValueError( f'`{val_name}` must be one of the following values:' + ''.join(map(lambda", "TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names:", "val: str Test target. val_name: str Test target name. Mainly used to create", "\"\"\"Raise if ``path`` exists and is a file. Parameters ---------- path: str Test", "str) -> None: \"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters ---------- val:", "a file. Parameters ---------- path: str Test path. Raises ------ FileExistsError When ``path``", "used to create error message. val_range: list Expected value range. Raises ------ ValueError", "j`` such that ``vals[i] > vals[j]``. \"\"\" for i in range(len(vals) - 1):", "Raises ------ FileExistsError When ``path`` exists and is a directory. \"\"\" if os.path.exists(path)", ") def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None: \"\"\"Raise if", "used to create error message. val_type: Type Expected target type. Raises ------ TypeError", "in ``val_range``. Parameters ---------- val: Any Test target. val_name: str Test target name.", "instance of ``val_type``. Parameters ---------- val: Any Test target. val_name: str Test target", "the following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val:", "exists and is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is", "if ``val`` is not an instance of ``val_type``. Parameters ---------- val: Any Test", "-> None: \"\"\"Raise if ``val`` is not an instance of ``val_type``. Parameters ----------", "values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name:", "such that ``vals[i] > vals[j]``. \"\"\" for i in range(len(vals) - 1): if", "``i < j`` such that ``vals[i] > vals[j]``. \"\"\" for i in range(len(vals)", "message. val_range: list Expected value range. Raises ------ ValueError When ``val`` is not", "a file.') def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None: \"\"\"Raise", "None: \"\"\"Raise if ``val`` is not an instance of ``val_type``. Parameters ---------- val:", "target type. Raises ------ TypeError When ``val`` is not an instance of ``val_type``.", "None: \"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters ---------- val: str Test", "path: str Test path. Raises ------ FileExistsError When ``path`` exists and is a", "Any, List, Type, Union def raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise", "import Any, List, Type, Union def raise_if_empty_str(*, val: str, val_name: str) -> None:", "Raises ------ FileExistsError When ``path`` exists and is a file. \"\"\" if os.path.exists(path)", "None: \"\"\"Raise if ``path`` exists and is a file. Parameters ---------- path: str", "not in val_range: raise ValueError( f'`{val_name}` must be one of the following values:'", "path. Raises ------ FileExistsError When ``path`` exists and is a directory. \"\"\" if", "empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def", "file.') def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None: \"\"\"Raise if", "``val`` is not in ``val_range``. \"\"\" if val not in val_range: raise ValueError(", "Test targets' names. Mainly used to create error message. Raises ------ ValueError When", "and values.\"\"\" import os from typing import Any, List, Type, Union def raise_if_empty_str(*,", "instance of ``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an", "+ ''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str,", "not an instance of ``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must", "val_name: str, val_range: List) -> None: \"\"\"Raise if ``val`` is not in ``val_range``.", "str Test target name. Mainly used to create error message. val_type: Type Expected", "------ FileExistsError When ``path`` exists and is a directory. \"\"\" if os.path.exists(path) and", "Expected value range. Raises ------ ValueError When ``val`` is not in ``val_range``. \"\"\"", "``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets. val_names: list[str] Test", "if ``val`` is not in ``val_range``. Parameters ---------- val: Any Test target. val_name:", "``val`` is not in ``val_range``. Parameters ---------- val: Any Test target. val_name: str", "must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str])", "is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.')", "Mainly used to create error message. val_range: list Expected value range. Raises ------", "used to create error message. Raises ------ ValueError When ``val`` is an empty", "an instance of ``val_type``. Parameters ---------- val: Any Test target. val_name: str Test", "exists and is a directory. Parameters ---------- path: str Test path. Raises ------", "- 1): if vals[i] > vals[i + 1]: raise ValueError(f'Must have `{\" <=", "val_names: list[str] Test targets' names. Mainly used to create error message. Raises ------", "``vals[i] > vals[j]``. \"\"\" for i in range(len(vals) - 1): if vals[i] >", "{v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None:", "\"\"\"Raise if ``path`` exists and is a directory. Parameters ---------- path: str Test", "val_names: List[str]) -> None: \"\"\"Raise if there exist some ``i < j`` such", "message. Raises ------ ValueError When ``val`` is an empty :py:class:`str`. \"\"\" if not", "os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any, val_name: str, val_range:", "an instance of ``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be", "os from typing import Any, List, Type, Union def raise_if_empty_str(*, val: str, val_name:", "-> None: \"\"\"Raise if ``path`` exists and is a file. Parameters ---------- path:", "str) -> None: \"\"\"Raise if ``path`` exists and is a file. Parameters ----------", "Parameters ---------- vals: list[Union[float, int]] Test targets. val_names: list[str] Test targets' names. Mainly", "\"\"\"Checking types and values.\"\"\" import os from typing import Any, List, Type, Union", "---------- path: str Test path. Raises ------ FileExistsError When ``path`` exists and is", "\"\"\" for i in range(len(vals) - 1): if vals[i] > vals[i + 1]:", "None: \"\"\"Raise if ``val`` is not in ``val_range``. Parameters ---------- val: Any Test", "When ``val`` is an empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must", "exists and is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is", "val_range: list Expected value range. Raises ------ ValueError When ``val`` is not in", "directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*,", "val: Any Test target. val_name: str Test target name. Mainly used to create", "val: str, val_name: str) -> None: \"\"\"Raise if ``val`` is an empty :py:class:`str`.", "a directory. Parameters ---------- path: str Test path. Raises ------ FileExistsError When ``path``", "types and values.\"\"\" import os from typing import Any, List, Type, Union def", "> vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets. val_names: list[str] Test targets'", "names. Mainly used to create error message. Raises ------ ValueError When there exist", "to create error message. Raises ------ ValueError When there exist some ``i <", "create error message. Raises ------ ValueError When there exist some ``i < j``", "target. val_name: str Test target name. Mainly used to create error message. val_range:", "that ``vals[i] > vals[j]``. \"\"\" for i in range(len(vals) - 1): if vals[i]", "``val`` is not an instance of ``val_type``. Parameters ---------- val: Any Test target.", "is a file.') def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None:", "target name. Mainly used to create error message. val_type: Type Expected target type.", "if ``val`` is an empty :py:class:`str`. Parameters ---------- val: str Test target. val_name:", ":py:class:`str`. Parameters ---------- val: str Test target. val_name: str Test target name. Mainly", "Type) -> None: \"\"\"Raise if ``val`` is not an instance of ``val_type``. Parameters", "Raises ------ ValueError When ``val`` is an empty :py:class:`str`. \"\"\" if not val:", "if there exist some ``i < j`` such that ``vals[i] > vals[j]``. Parameters", "def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is a", "def raise_if_not_instance(*, val: Any, val_name: str, val_type: Type) -> None: \"\"\"Raise if ``val``", "a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def", "some ``i < j`` such that ``vals[i] > vals[j]``. \"\"\" for i in", "\"\"\" if os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val:", "``path`` exists and is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path}", "str, val_range: List) -> None: \"\"\"Raise if ``val`` is not in ``val_range``. Parameters", "''.join(map(lambda v: f'\\n- {v}', val_range)) ) def raise_if_not_instance(*, val: Any, val_name: str, val_type:", "and is a directory. \"\"\" if os.path.exists(path) and os.path.isdir(path): raise FileExistsError(f'{path} is a", "range(len(vals) - 1): if vals[i] > vals[i + 1]: raise ValueError(f'Must have `{\"", "name. Mainly used to create error message. val_type: Type Expected target type. Raises", "Parameters ---------- path: str Test path. Raises ------ FileExistsError When ``path`` exists and", "of ``val_type``. \"\"\" if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance", "not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*,", "ValueError( f'`{val_name}` must be one of the following values:' + ''.join(map(lambda v: f'\\n-", "path: str) -> None: \"\"\"Raise if ``path`` exists and is a directory. Parameters", "must be non-empty `str`.') def raise_if_is_directory(*, path: str) -> None: \"\"\"Raise if ``path``", "``val_range``. \"\"\" if val not in val_range: raise ValueError( f'`{val_name}` must be one", "isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals:", "message. val_type: Type Expected target type. Raises ------ TypeError When ``val`` is not", "and is a file. Parameters ---------- path: str Test path. Raises ------ FileExistsError", "str, val_type: Type) -> None: \"\"\"Raise if ``val`` is not an instance of", "create error message. Raises ------ ValueError When ``val`` is an empty :py:class:`str`. \"\"\"", "Raises ------ ValueError When there exist some ``i < j`` such that ``vals[i]", "is not in ``val_range``. Parameters ---------- val: Any Test target. val_name: str Test", "def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if there exist", "Mainly used to create error message. Raises ------ ValueError When ``val`` is an", "Raises ------ ValueError When ``val`` is not in ``val_range``. \"\"\" if val not", "val_name: str Test target name. Mainly used to create error message. val_range: list", "``path`` exists and is a file. Parameters ---------- path: str Test path. Raises", "``path`` exists and is a directory. Parameters ---------- path: str Test path. Raises", "list[Union[float, int]] Test targets. val_names: list[str] Test targets' names. Mainly used to create", "``val_range``. Parameters ---------- val: Any Test target. val_name: str Test target name. Mainly", "When ``path`` exists and is a file. \"\"\" if os.path.exists(path) and os.path.isfile(path): raise", "``val_type``. Parameters ---------- val: Any Test target. val_name: str Test target name. Mainly", "ValueError When ``val`` is not in ``val_range``. \"\"\" if val not in val_range:", "typing import Any, List, Type, Union def raise_if_empty_str(*, val: str, val_name: str) ->", "Test target name. Mainly used to create error message. val_type: Type Expected target", "error message. Raises ------ ValueError When ``val`` is an empty :py:class:`str`. \"\"\" if", "is an empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must be non-empty", "def raise_if_not_in(*, val: Any, val_name: str, val_range: List) -> None: \"\"\"Raise if ``val``", "and os.path.isdir(path): raise FileExistsError(f'{path} is a directory.') def raise_if_is_file(*, path: str) -> None:", "List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise if there exist some ``i <", "vals[j]``. \"\"\" for i in range(len(vals) - 1): if vals[i] > vals[i +", "directory.') def raise_if_is_file(*, path: str) -> None: \"\"\"Raise if ``path`` exists and is", "Test target name. Mainly used to create error message. val_range: list Expected value", "< j`` such that ``vals[i] > vals[j]``. \"\"\" for i in range(len(vals) -", "values.\"\"\" import os from typing import Any, List, Type, Union def raise_if_empty_str(*, val:", "1): if vals[i] > vals[i + 1]: raise ValueError(f'Must have `{\" <= \".join(val_names)}`.')", "``val`` is an empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must be", "not val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path: str) ->", "not in ``val_range``. \"\"\" if val not in val_range: raise ValueError( f'`{val_name}` must", "an empty :py:class:`str`. Parameters ---------- val: str Test target. val_name: str Test target", "be one of the following values:' + ''.join(map(lambda v: f'\\n- {v}', val_range)) )", "Any, val_name: str, val_type: Type) -> None: \"\"\"Raise if ``val`` is not an", "target. val_name: str Test target name. Mainly used to create error message. Raises", "to create error message. val_range: list Expected value range. Raises ------ ValueError When", "target. val_name: str Test target name. Mainly used to create error message. val_type:", "j`` such that ``vals[i] > vals[j]``. Parameters ---------- vals: list[Union[float, int]] Test targets.", "create error message. val_type: Type Expected target type. Raises ------ TypeError When ``val``", "Raises ------ TypeError When ``val`` is not an instance of ``val_type``. \"\"\" if", "ValueError When ``val`` is an empty :py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}`", "val: Any, val_name: str, val_type: Type) -> None: \"\"\"Raise if ``val`` is not", "in range(len(vals) - 1): if vals[i] > vals[i + 1]: raise ValueError(f'Must have", "is not an instance of ``val_type``. Parameters ---------- val: Any Test target. val_name:", "val_name: str Test target name. Mainly used to create error message. Raises ------", "\"\"\"Raise if ``val`` is not in ``val_range``. Parameters ---------- val: Any Test target.", "---------- val: Any Test target. val_name: str Test target name. Mainly used to", "val_range: List) -> None: \"\"\"Raise if ``val`` is not in ``val_range``. Parameters ----------", "-> None: \"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters ---------- val: str", "and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any, val_name: str,", "if not isinstance(val, val_type): raise TypeError(f'`{val_name}` must be an instance of `{val_type.__name__}`.') def", "val_name: str, val_type: Type) -> None: \"\"\"Raise if ``val`` is not an instance", "range. Raises ------ ValueError When ``val`` is not in ``val_range``. \"\"\" if val", "\"\"\" if val not in val_range: raise ValueError( f'`{val_name}` must be one of", "of ``val_type``. Parameters ---------- val: Any Test target. val_name: str Test target name.", ":py:class:`str`. \"\"\" if not val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*,", "to create error message. val_type: Type Expected target type. Raises ------ TypeError When", "val_name: str) -> None: \"\"\"Raise if ``val`` is an empty :py:class:`str`. Parameters ----------", "Test target name. Mainly used to create error message. Raises ------ ValueError When", "raise ValueError( f'`{val_name}` must be one of the following values:' + ''.join(map(lambda v:", "List, Type, Union def raise_if_empty_str(*, val: str, val_name: str) -> None: \"\"\"Raise if", "Mainly used to create error message. Raises ------ ValueError When there exist some", "Any Test target. val_name: str Test target name. Mainly used to create error", "val_type: Type) -> None: \"\"\"Raise if ``val`` is not an instance of ``val_type``.", "instance of `{val_type.__name__}`.') def raise_if_wrong_ordered(*, vals: List[Union[float, int]], val_names: List[str]) -> None: \"\"\"Raise", "str Test target name. Mainly used to create error message. Raises ------ ValueError", "\"\"\" if not val: raise ValueError(f'`{val_name}` must be non-empty `str`.') def raise_if_is_directory(*, path:", "value range. Raises ------ ValueError When ``val`` is not in ``val_range``. \"\"\" if", "os.path.exists(path) and os.path.isfile(path): raise FileExistsError(f'{path} is a file.') def raise_if_not_in(*, val: Any, val_name:", "there exist some ``i < j`` such that ``vals[i] > vals[j]``. Parameters ----------", "is not in ``val_range``. \"\"\" if val not in val_range: raise ValueError( f'`{val_name}`", "file. Parameters ---------- path: str Test path. Raises ------ FileExistsError When ``path`` exists", "ValueError When there exist some ``i < j`` such that ``vals[i] > vals[j]``.", "not an instance of ``val_type``. Parameters ---------- val: Any Test target. val_name: str", "if ``path`` exists and is a file. Parameters ---------- path: str Test path." ]
[ "running_loss = 0.0 running_corrects = 0 loader = dataloaders[phase] # Iterate over data.", "+= loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss /", "[] val_loss_history = [] tr_acc_history = [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict())", "> best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc)", "optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer", "in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers", "inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc =", "on the test images: %d %%' % (100 * correct / total)) print('Accuracy", "plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout()", "* inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc", "#model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer # specify loss", "{:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history", "num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1,", "= labels.to(device) # print(\"in dataloaders\", end=\" \") # zero the parameter gradients optimizer.zero_grad()", "outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct +=", "= models.vgg16(pretrained=True) # freeze layers before classifiers for param in model_ft.features.parameters(): # print(param)", "running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase,", "num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes the model,", "= 0.0 running_corrects = 0 loader = dataloaders[phase] # Iterate over data. for", "zero the parameter gradients optimizer.zero_grad() # forward # track history if only in", "val_loss_history = [] tr_acc_history = [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc", "plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0))", "Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of", "print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has a training and", "0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x],", "the last layer # specify loss function criterion = nn.CrossEntropyLoss() # specify optimizer", "Check early stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history,", "5 on the test images: %d %%' % (100 * topk / total))", "Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number", "inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\" \") # zero the parameter", "with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\") _, preds = torch.max(outputs,", "lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send", "models, transforms from torch.autograd import Variable import matplotlib.pyplot as plt import time import", "in testloader: images, labels = data images = images.to(device) labels = labels.to(device) outputs", "= nn.Linear(4096,10) #modify the last layer # specify loss function criterion = nn.CrossEntropyLoss()", "= 0 topk = 0 total = 0 testloader = dataloaders['test'] with torch.no_grad():", "== 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if", "dataloaders, criterion, optimizer, device(GPU or CPU) and number of epochs respectively as parameters", "#different number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad =", "nn import torch.optim as optim import tensorflow as tf import numpy as np", "as parameters # returns model, array of validation accuracy, validation loss, train accuracy,", "lr_scheduler # Each epoch has a training and validation phase for phase in", "1 # Check early stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return", "tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1,", "inputs = inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\" \") # zero", "0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes = 10 batch_size = 32", "of the top 5 on the test images: %d %%' % (100 *", "train accuracy and number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs):", "copy the model if phase == 'validation' and epoch_acc > best_acc: best_acc =", "import os import copy import tensorflow as tf # plot the train and", "the test images: %d %%' % (100 * correct / total)) print('Accuracy of", "print(\"x\") _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward +", "in range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy of the model on", "= { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])", "False #different number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad", "stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training", "to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device,", "vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\")", "= val_loss else: epochs_no_improve += 1 # Check early stopping condition if epochs_no_improve", "= 0 min_val_loss = val_loss else: epochs_no_improve += 1 # Check early stopping", "= model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels)", "torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(),", "val_loss else: epochs_no_improve += 1 # Check early stopping condition if epochs_no_improve ==", "lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else", "[] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5", "x in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train',", "10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has a training and validation phase", "0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve = 0 for epoch in", "for param in model_ft.features.parameters(): # print(param) param.requires_grad = False #different number of layer", "= train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the model correct =", "dataloaders\", end=\" \") # zero the parameter gradients optimizer.zero_grad() # forward # track", "transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256),", "augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts)", "train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the model correct = 0", "class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers for param", "images: %d %%' % (100 * topk / total)) # val/train loss and", "from torch.autograd import Variable import matplotlib.pyplot as plt import time import os import", "torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1)", "epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for", "= [] tr_acc_history = [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc =", "for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch)", "criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history = [] tr_acc_history", "= copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve =", "# track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs =", "'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names", "= epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase", "phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if epoch_loss", "# Check early stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model,", "0 testloader = dataloaders['test'] with torch.no_grad(): for data in testloader: images, labels =", "has a training and validation phase for phase in ['train', 'validation']: if phase", "datasets, models, transforms from torch.autograd import Variable import matplotlib.pyplot as plt import time", "= criterion(outputs, labels) # backward + optimize only if in training phase if", "correct += (predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0)", "if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve +=", "else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects =", "model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms = { 'train': transforms.Compose([", "loader = dataloaders[phase] # Iterate over data. for inputs, labels in loader: inputs", "1) total += labels.size(0) correct += (predicted == labels).sum().item() probs, classes = outputs.topk(5,", "early stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history,", "outputs.topk(5, dim=1) labels_size = labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]): topk", "stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history,", "training and validation phase for phase in ['train', 'validation']: if phase == 'train':", "total += labels.size(0) correct += (predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1)", "= model_ft(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),", "= torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer =", "from __future__ import division import torch import torch.nn as nn import torch.optim as", "deep copy the model if phase == 'validation' and epoch_acc > best_acc: best_acc", "if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the model to the gpu", "= 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation',", "criterion(outputs, labels) # backward + optimize only if in training phase if phase", "= torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if", "#train model #test the model correct = 0 topk = 0 total =", "transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256),", "# backward + optimize only if in training phase if phase == 'train':", "tr_acc_history = [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop", "and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase ==", "best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss)", "statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss =", "validation loss, train accuracy, train loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25):", "n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() -", "val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224),", "len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))", "torch import torch.nn as nn import torch.optim as optim import tensorflow as tf", "['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before", "as np import torchvision from torchvision import datasets, models, transforms from torch.autograd import", "copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss)", "for x in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2)", "labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy of", "testloader = dataloaders['test'] with torch.no_grad(): for data in testloader: images, labels = data", "condition if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history", "Variable import matplotlib.pyplot as plt import time import os import copy import tensorflow", "in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) #", "best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if", "0 topk = 0 total = 0 testloader = dataloaders['test'] with torch.no_grad(): for", "respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history = []", "respectively as parameters # returns model, array of validation accuracy, validation loss, train", "evaluate mode running_loss = 0.0 running_corrects = 0 loader = dataloaders[phase] # Iterate", "losses, train accuracy and number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc,", "#model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify", "import Variable import matplotlib.pyplot as plt import time import os import copy import", "as nn import torch.optim as optim import tensorflow as tf import numpy as", "and validation loss/accuracy plots and save it. # takes array of validation losses,", "= dataloaders['test'] with torch.no_grad(): for data in testloader: images, labels = data images", "= nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer,", "classes[i]): topk += 1 print('Accuracy of the model on the test images: %d", "in ['train', 'validation']: if phase == 'train': model.train() # Set model to training", "criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler =", "inputs, labels in loader: inputs = inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\",", "'validation']: if phase == 'train': model.train() # Set model to training mode else:", "labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct", "param.requires_grad = False #different number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad =", "losses, validation accuracy, train losses, train accuracy and number of epochs respectively. def", "0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the", "== labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{}", "/ len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the", "model.train() # Set model to training mode else: model.eval() # Set model to", "device(GPU or CPU) and number of epochs respectively as parameters # returns model,", "'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers for", "batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x])", "model to training mode else: model.eval() # Set model to evaluate mode running_loss", "validation phase for phase in ['train', 'validation']: if phase == 'train': model.train() #", "layers before classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad = False #different", "device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history = [] tr_acc_history = []", "as optim import tensorflow as tf import numpy as np import torchvision from", "model, array of validation accuracy, validation loss, train accuracy, train loss respectively def", "= images.to(device) labels = labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1)", "[] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss = np.Inf", "#early stopping if phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0", "- since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best", "epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) #", "1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1,", "0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "{ 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]),", "plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212)", "torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available()", "specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different", "model correct = 0 topk = 0 total = 0 testloader = dataloaders['test']", "train and validation loss/accuracy plots and save it. # takes array of validation", "save it. # takes array of validation losses, validation accuracy, train losses, train", "print(param) param.requires_grad = False #different number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad", "torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() probs, classes =", "% (100 * topk / total)) # val/train loss and accuracy plots plot_graph(val_loss,", "val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed", "#lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay", "# Each epoch has a training and validation phase for phase in ['train',", "best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve", "in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler", "'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase", "of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6]", "model_ft = model_ft.to(device) #send the model to the gpu model_ft, val_acc, val_loss, tr_acc,", "dim=1) labels_size = labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]): topk +=", "plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0))", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes =", "#weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")", "#optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft", "= model_ft.to(device) #send the model to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss", "{}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch", "takes the model, dataloaders, criterion, optimizer, device(GPU or CPU) and number of epochs", "probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for i in range(labels_size): if(labels[i]", "['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']}", "min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve += 1 # Check", "def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history", "60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights", "epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number", "respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of", "optim import tensorflow as tf import numpy as np import torchvision from torchvision", "torch.optim as optim import tensorflow as tf import numpy as np import torchvision", "'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation':", "/ total)) # val/train loss and accuracy plots plot_graph(val_loss, val_acc, tr_loss, tr_acc, 30)", "import numpy as np import torchvision from torchvision import datasets, models, transforms from", "transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir", "training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item()", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,", "model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model", "labels = data images = images.to(device) labels = labels.to(device) outputs = model_ft(images) _,", "data images = images.to(device) labels = labels.to(device) outputs = model_ft(images) _, predicted =", "0 total = 0 testloader = dataloaders['test'] with torch.no_grad(): for data in testloader:", "%%' % (100 * correct / total)) print('Accuracy of the top 5 on", "= labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy", "0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "optimizer,device, num_epochs=30) #train model #test the model correct = 0 topk = 0", "tr_loss_history # data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485,", "tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed //", "mode running_loss = 0.0 running_corrects = 0 loader = dataloaders[phase] # Iterate over", "0 min_val_loss = val_loss else: epochs_no_improve += 1 # Check early stopping condition", "= True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer #", "# takes array of validation losses, validation accuracy, train losses, train accuracy and", "= copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc)", "data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True,", "model to evaluate mode running_loss = 0.0 running_corrects = 0 loader = dataloaders[phase]", "* 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has a training and validation", "correct = 0 topk = 0 total = 0 testloader = dataloaders['test'] with", "total = 0 testloader = dataloaders['test'] with torch.no_grad(): for data in testloader: images,", "0.406], [0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes = 10 batch_size", "copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve = 0", "= {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft", "the top 5 on the test images: %d %%' % (100 * topk", "data in testloader: images, labels = data images = images.to(device) labels = labels.to(device)", "num_classes = 10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for", "validation accuracy, validation loss, train accuracy, train loss respectively def train_model(model, dataloaders, criterion,", "print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history,", "as tf import numpy as np import torchvision from torchvision import datasets, models,", "else: epochs_no_improve += 1 # Check early stopping condition if epochs_no_improve == n_epochs_stop:", "loader: inputs = inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\" \") #", "only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\") _,", "torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)", "np.Inf epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1))", "model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms", "end=\" \") # zero the parameter gradients optimizer.zero_grad() # forward # track history", "+= torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() /", "= running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc:", "the parameter gradients optimizer.zero_grad() # forward # track history if only in train", "model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers for param in model_ft.features.parameters(): #", "torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes = {x:", "labels = labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1) total +=", "data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "#test the model correct = 0 topk = 0 total = 0 testloader", "images = images.to(device) labels = labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data,", "if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0)", "track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs)", "plot the train and validation loss/accuracy plots and save it. # takes array", "epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-'", "model if phase == 'validation' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts", "]), } data_dir = \"dataset\" num_classes = 10 batch_size = 32 image_datasets =", "loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history =", "tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve =", "tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the model", "'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers", "layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] =", "torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(),", "plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend()", "accuracy and number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211)", "optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device =", "'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train',", "criterion, optimizer, device(GPU or CPU) and number of epochs respectively as parameters #", "= running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep", "loss, train accuracy, train loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since", "best_acc = 0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve = 0 for", "number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots", "preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only", "function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler", "plt.savefig(\"plot.png\") # train the model # takes the model, dataloaders, criterion, optimizer, device(GPU", "= 0 total = 0 testloader = dataloaders['test'] with torch.no_grad(): for data in", "phase in ['train', 'validation']: if phase == 'train': model.train() # Set model to", "model #test the model correct = 0 topk = 0 total = 0", "time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed", "accuracy, train loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time()", "classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for i in range(labels_size): if(labels[i] in", "plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\")", "loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds ==", "0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])", "+= (predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for", "step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay=", "specify loss function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001)", "layer # specify loss function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer =", "Each epoch has a training and validation phase for phase in ['train', 'validation']:", "# print(\"in dataloaders\", end=\" \") # zero the parameter gradients optimizer.zero_grad() # forward", "torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the model to the", "# takes the model, dataloaders, criterion, optimizer, device(GPU or CPU) and number of", "labels_size = labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]): topk += 1", "labels.to(device) # print(\"in dataloaders\", end=\" \") # zero the parameter gradients optimizer.zero_grad() #", "labels = labels.to(device) # print(\"in dataloaders\", end=\" \") # zero the parameter gradients", "dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history = []", "Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history,", "gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase", "Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if phase", "to evaluate mode running_loss = 0.0 running_corrects = 0 loader = dataloaders[phase] #", "+= 1 print('Accuracy of the model on the test images: %d %%' %", "0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes = 10", "train the model # takes the model, dataloaders, criterion, optimizer, device(GPU or CPU)", "the model # takes the model, dataloaders, criterion, optimizer, device(GPU or CPU) and", "labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for i in range(labels_size):", "and number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss", "def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training", "print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each", "number of epochs respectively as parameters # returns model, array of validation accuracy,", "images, labels = data images = images.to(device) labels = labels.to(device) outputs = model_ft(images)", "top 5 on the test images: %d %%' % (100 * topk /", "epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve += 1 # Check early", "# print(\"x\") _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward", "of validation accuracy, validation loss, train accuracy, train loss respectively def train_model(model, dataloaders,", "= torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() probs, classes", "plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes the model, dataloaders, criterion, optimizer,", "'train': model.train() # Set model to training mode else: model.eval() # Set model", "== 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects +=", "= image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers for param in", "# statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss", "= {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders =", "phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects", "model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training complete in", "plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\")", "import division import torch import torch.nn as nn import torch.optim as optim import", "import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import", "% 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return", "in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss +=", "tensorflow as tf import numpy as np import torchvision from torchvision import datasets,", "numpy as np import torchvision from torchvision import datasets, models, transforms from torch.autograd", "if phase == 'train': model.train() # Set model to training mode else: model.eval()", "- 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has a", "= 10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x", "torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in", "+ optimize only if in training phase if phase == 'train': loss.backward() optimizer.step()", "correct / total)) print('Accuracy of the top 5 on the test images: %d", "tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\")", "val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase ==", "test images: %d %%' % (100 * correct / total)) print('Accuracy of the", "else \"cpu\") model_ft = model_ft.to(device) #send the model to the gpu model_ft, val_acc,", "[0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "x in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for", "True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer # specify loss function criterion", "train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history =", "if(labels[i] in classes[i]): topk += 1 print('Accuracy of the model on the test", "+= 1 # Check early stopping condition if epochs_no_improve == n_epochs_stop: print('Early stopping!')", "# Iterate over data. for inputs, labels in loader: inputs = inputs.to(device) labels", "for phase in ['train', 'validation']: if phase == 'train': model.train() # Set model", "model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms = {", "import matplotlib.pyplot as plt import time import os import copy import tensorflow as", "import torch.nn as nn import torch.optim as optim import tensorflow as tf import", "== 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if epoch_loss <", "%d %%' % (100 * correct / total)) print('Accuracy of the top 5", "model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) #", "= False #different number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True", "'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve", "np import torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable", "tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve", "print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc:", "in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation',", "import tensorflow as tf import numpy as np import torchvision from torchvision import", "plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend()", "{x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes =", "#modify the last layer # specify loss function criterion = nn.CrossEntropyLoss() # specify", "= 0 testloader = dataloaders['test'] with torch.no_grad(): for data in testloader: images, labels", "= torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer =", "dataloaders['test'] with torch.no_grad(): for data in testloader: images, labels = data images =", "optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() #", "# load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history #", "if phase == 'validation' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts =", "5 min_val_loss = np.Inf epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch,", "from __future__ import print_function from __future__ import division import torch import torch.nn as", "model_ft.features.parameters(): # print(param) param.requires_grad = False #different number of layer freeze #model_ft.features[-1].requires_grad =", "__future__ import print_function from __future__ import division import torch import torch.nn as nn", "= True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the", "{:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load", "tr_loss_history time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,", "return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms = { 'train':", "nn.Linear(4096,10) #modify the last layer # specify loss function criterion = nn.CrossEntropyLoss() #", "range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler #", "load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data", "tensorflow as tf # plot the train and validation loss/accuracy plots and save", "for inputs, labels in loader: inputs = inputs.to(device) labels = labels.to(device) # print(\"in", "[0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229,", "= torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the model to", "for data in testloader: images, labels = data images = images.to(device) labels =", "# freeze layers before classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad =", "0.225]) ]), } data_dir = \"dataset\" num_classes = 10 batch_size = 32 image_datasets", "running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss", "torch.no_grad(): for data in testloader: images, labels = data images = images.to(device) labels", "phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss", "os import copy import tensorflow as tf # plot the train and validation", "vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy", "1 print('Accuracy of the model on the test images: %d %%' % (100", "optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data)", "Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model", "= {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes", "tr_acc_history, tr_loss_history # data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(),", "or CPU) and number of epochs respectively as parameters # returns model, array", "Iterate over data. for inputs, labels in loader: inputs = inputs.to(device) labels =", "train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\") _, preds =", "import copy import tensorflow as tf # plot the train and validation loss/accuracy", "shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for", "matplotlib.pyplot as plt import time import os import copy import tensorflow as tf", "with torch.no_grad(): for data in testloader: images, labels = data images = images.to(device)", "(predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for i", "in model_ft.features.parameters(): # print(param) param.requires_grad = False #different number of layer freeze #model_ft.features[-1].requires_grad", "epochs respectively as parameters # returns model, array of validation accuracy, validation loss,", "and save it. # takes array of validation losses, validation accuracy, train losses,", "freeze layers before classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad = False", "%%' % (100 * topk / total)) # val/train loss and accuracy plots", "if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early", "(100 * topk / total)) # val/train loss and accuracy plots plot_graph(val_loss, val_acc,", "= time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed %", "division import torch import torch.nn as nn import torch.optim as optim import tensorflow", "from torchvision import datasets, models, transforms from torch.autograd import Variable import matplotlib.pyplot as", "Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") #", "# returns model, array of validation accuracy, validation loss, train accuracy, train loss", "history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) #", "x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze", "['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in", "val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history,", "validation losses, validation accuracy, train losses, train accuracy and number of epochs respectively.", "0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "time import os import copy import tensorflow as tf # plot the train", "#model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer", "= torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft =", "freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10)", "num_epochs - 1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has", "to training mode else: model.eval() # Set model to evaluate mode running_loss =", "labels.size(0) correct += (predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size =", "val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(),", "val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the", "copy import tensorflow as tf # plot the train and validation loss/accuracy plots", "= \"dataset\" num_classes = 10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),", "gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train", "validation loss/accuracy plots and save it. # takes array of validation losses, validation", "classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad = False #different number of", "len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model", "batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train',", "last layer # specify loss function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer", "tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss", "labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss:", "phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping", "if epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed", "import torch import torch.nn as nn import torch.optim as optim import tensorflow as", "torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the model to the gpu model_ft,", "* correct / total)) print('Accuracy of the top 5 on the test images:", "{:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'validation' and", "= [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop =", "train loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since = time.time() val_acc_history", "since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val", "(100 * correct / total)) print('Accuracy of the top 5 on the test", "takes array of validation losses, validation accuracy, train losses, train accuracy and number", "val_loss_history.append(epoch_loss) if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation':", "'test']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names =", "models.vgg16(pretrained=True) # freeze layers before classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad", "and number of epochs respectively as parameters # returns model, array of validation", "0 loader = dataloaders[phase] # Iterate over data. for inputs, labels in loader:", "epochs_no_improve += 1 # Check early stopping condition if epochs_no_improve == n_epochs_stop: print('Early", "0.0 running_corrects = 0 loader = dataloaders[phase] # Iterate over data. for inputs,", "} data_dir = \"dataset\" num_classes = 10 batch_size = 32 image_datasets = {x:", "# plot the train and validation loss/accuracy plots and save it. # takes", "array of validation accuracy, validation loss, train accuracy, train loss respectively def train_model(model,", "number of layer freeze #model_ft.features[-1].requires_grad = True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True", "as tf # plot the train and validation loss/accuracy plots and save it.", "if phase == 'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if", "# data augmentation data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456,", "images.to(device) labels = labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1) total", "import torch.optim as optim import tensorflow as tf import numpy as np import", "phase for phase in ['train', 'validation']: if phase == 'train': model.train() # Set", "if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\")", "train losses, train accuracy and number of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss,", "of epochs respectively as parameters # returns model, array of validation accuracy, validation", "_, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item()", "1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes the model, dataloaders,", "labels in loader: inputs = inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\"", "]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]),", "_, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize", "the test images: %d %%' % (100 * topk / total)) # val/train", "total)) print('Accuracy of the top 5 on the test images: %d %%' %", "= np.Inf epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs -", "epoch_acc)) # deep copy the model if phase == 'validation' and epoch_acc >", "# forward # track history if only in train with torch.set_grad_enabled(phase == 'train'):", "epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve += 1", "'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds", "+= labels.size(0) correct += (predicted == labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size", "{:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best", "time.time() val_acc_history = [] val_loss_history = [] tr_acc_history = [] tr_loss_history = []", "transforms from torch.autograd import Variable import matplotlib.pyplot as plt import time import os", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes", "transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224),", "tf # plot the train and validation loss/accuracy plots and save it. #", "print('Accuracy of the top 5 on the test images: %d %%' % (100", "test images: %d %%' % (100 * topk / total)) # val/train loss", "{x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders = {x:", "a training and validation phase for phase in ['train', 'validation']: if phase ==", "of epochs respectively. def plot_graph(val_loss, val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs.", "= [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 n_epochs_stop = 5 min_val_loss =", "in loader: inputs = inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\" \")", "'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test':", "'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), }", "predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() probs,", "== 'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else:", "32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation', 'test']}", "transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224),", "= data images = images.to(device) labels = labels.to(device) outputs = model_ft(images) _, predicted", "optimizer, device(GPU or CPU) and number of epochs respectively as parameters # returns", "for x in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x in", "plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training", "# zero the parameter gradients optimizer.zero_grad() # forward # track history if only", "<reponame>AybukeYALCINER/image_classification<filename>part2.py from __future__ import print_function from __future__ import division import torch import torch.nn", "#different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device", "print('Accuracy of the model on the test images: %d %%' % (100 *", "= labels.to(device) outputs = model_ft(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0)", "print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since", "torchvision from torchvision import datasets, models, transforms from torch.autograd import Variable import matplotlib.pyplot", "model on the test images: %d %%' % (100 * correct / total))", "of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train", "== 'train'): outputs = model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1) loss", "num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\")", "forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs", "* topk / total)) # val/train loss and accuracy plots plot_graph(val_loss, val_acc, tr_loss,", "= outputs.topk(5, dim=1) labels_size = labels.size(0) for i in range(labels_size): if(labels[i] in classes[i]):", "[] tr_acc_history = [] tr_loss_history = [] best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0", "running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double()", "< min_val_loss: epochs_no_improve = 0 min_val_loss = val_loss else: epochs_no_improve += 1 #", "epoch has a training and validation phase for phase in ['train', 'validation']: if", "it. # takes array of validation losses, validation accuracy, train losses, train accuracy", "parameter gradients optimizer.zero_grad() # forward # track history if only in train with", "val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training complete in {:.0f}m", "transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir =", "on the test images: %d %%' % (100 * topk / total)) #", "= 0 loader = dataloaders[phase] # Iterate over data. for inputs, labels in", "# specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)", "labels) # backward + optimize only if in training phase if phase ==", "model, dataloaders, criterion, optimizer, device(GPU or CPU) and number of epochs respectively as", "gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001)", "\"cpu\") model_ft = model_ft.to(device) #send the model to the gpu model_ft, val_acc, val_loss,", "topk / total)) # val/train loss and accuracy plots plot_graph(val_loss, val_acc, tr_loss, tr_acc,", "data_transforms = { 'train': transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\"", "images: %d %%' % (100 * correct / total)) print('Accuracy of the top", "topk += 1 print('Accuracy of the model on the test images: %d %%'", "# print(param) param.requires_grad = False #different number of layer freeze #model_ft.features[-1].requires_grad = True", "the train and validation loss/accuracy plots and save it. # takes array of", "if phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss =", "# specify loss function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(),", "plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes the", "dataloaders[phase] # Iterate over data. for inputs, labels in loader: inputs = inputs.to(device)", "epoch_loss = running_loss / len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f}", "only if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics", "/ total)) print('Accuracy of the top 5 on the test images: %d %%'", "model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0", "array of validation losses, validation accuracy, train losses, train accuracy and number of", "time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))", "the model to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders,", "1) loss = criterion(outputs, labels) # backward + optimize only if in training", "%d %%' % (100 * topk / total)) # val/train loss and accuracy", "running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy", "num_workers=2) for x in ['train', 'validation', 'test']} dataset_sizes = {x: len(image_datasets[x]) for x", "== 'train': model.train() # Set model to training mode else: model.eval() # Set", "the model if phase == 'validation' and epoch_acc > best_acc: best_acc = epoch_acc", "CPU) and number of epochs respectively as parameters # returns model, array of", "Set model to evaluate mode running_loss = 0.0 running_corrects = 0 loader =", "# Set model to evaluate mode running_loss = 0.0 running_corrects = 0 loader", "60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model,", "complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc))", "[0.229, 0.224, 0.225]) ]), } data_dir = \"dataset\" num_classes = 10 batch_size =", "epoch_loss, epoch_acc)) # deep copy the model if phase == 'validation' and epoch_acc", "{:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if phase ==", "'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation',", "of the model on the test images: %d %%' % (100 * correct", "backward + optimize only if in training phase if phase == 'train': loss.backward()", "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,", "= 0.0 n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve = 0 for epoch", "outputs = model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1) loss = criterion(outputs,", "lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001)", "phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss += loss.item() *", "Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots", "image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders", "print_function from __future__ import division import torch import torch.nn as nn import torch.optim", "parameters # returns model, array of validation accuracy, validation loss, train accuracy, train", "['train', 'validation']: if phase == 'train': model.train() # Set model to training mode", "transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(),", "data_dir = \"dataset\" num_classes = 10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir,", "optimizer, device,num_epochs=25): since = time.time() val_acc_history = [] val_loss_history = [] tr_acc_history =", "if in training phase if phase == 'train': loss.backward() optimizer.step() # statistics running_loss", "torch.nn as nn import torch.optim as optim import tensorflow as tf import numpy", "plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes the model, dataloaders, criterion,", "Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the", "len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True)", "dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the model correct = 0 topk", "epochs_no_improve == n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed =", "in classes[i]): topk += 1 print('Accuracy of the model on the test images:", "# deep copy the model if phase == 'validation' and epoch_acc > best_acc:", "val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test", "== n_epochs_stop: print('Early stopping!') return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time()", "'train': tr_acc_history.append(epoch_acc) tr_loss_history.append(epoch_loss) #early stopping if phase == 'validation': if epoch_loss < min_val_loss:", "weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation data_transforms =", "plots and save it. # takes array of validation losses, validation accuracy, train", "#for lr_scheduler # Each epoch has a training and validation phase for phase", "before classifiers for param in model_ft.features.parameters(): # print(param) param.requires_grad = False #different number", "model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer # specify loss function criterion =", "]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]),", "plt import time import os import copy import tensorflow as tf # plot", "plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model #", "0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406],", "over data. for inputs, labels in loader: inputs = inputs.to(device) labels = labels.to(device)", "True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer # specify", "the model, dataloaders, criterion, optimizer, device(GPU or CPU) and number of epochs respectively", "= inputs.to(device) labels = labels.to(device) # print(\"in dataloaders\", end=\" \") # zero the", "#scheduler.step(epoch) #for lr_scheduler # Each epoch has a training and validation phase for", "best model weights model.load_state_dict(best_model_wts) return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history # data augmentation", "image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) # freeze layers before classifiers for param in model_ft.features.parameters():", "model to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion,", "= 5 min_val_loss = np.Inf epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch", "0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10)", "10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in", "transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'validation': transforms.Compose([", "val_acc_history = [] val_loss_history = [] tr_acc_history = [] tr_loss_history = [] best_model_wts", "= dataloaders[phase] # Iterate over data. for inputs, labels in loader: inputs =", "{x: len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft =", "returns model, array of validation accuracy, validation loss, train accuracy, train loss respectively", "param in model_ft.features.parameters(): # print(param) param.requires_grad = False #different number of layer freeze", "Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'validation'", "data. for inputs, labels in loader: inputs = inputs.to(device) labels = labels.to(device) #", "import datasets, models, transforms from torch.autograd import Variable import matplotlib.pyplot as plt import", "loss function criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler", "nn.CrossEntropyLoss() # specify optimizer optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3,", "#send the model to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft,", "tf import numpy as np import torchvision from torchvision import datasets, models, transforms", "accuracy, train losses, train accuracy and number of epochs respectively. def plot_graph(val_loss, val_acc,", "torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device)", "== labels).sum().item() probs, classes = outputs.topk(5, dim=1) labels_size = labels.size(0) for i in", "\") # zero the parameter gradients optimizer.zero_grad() # forward # track history if", "torch.autograd import Variable import matplotlib.pyplot as plt import time import os import copy", "training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0", "range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy of the model on the", "stopping if phase == 'validation': if epoch_loss < min_val_loss: epochs_no_improve = 0 min_val_loss", "import time import os import copy import tensorflow as tf # plot the", "best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase == 'train':", "of validation losses, validation accuracy, train losses, train accuracy and number of epochs", "in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) # print(\"x\") _, preds", "= time.time() val_acc_history = [] val_loss_history = [] tr_acc_history = [] tr_loss_history =", "and validation phase for phase in ['train', 'validation']: if phase == 'train': model.train()", "__future__ import division import torch import torch.nn as nn import torch.optim as optim", "for x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes model_ft = models.vgg16(pretrained=True) #", "plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\")", "phase == 'train': model.train() # Set model to training mode else: model.eval() #", "'train'): outputs = model(inputs) # print(\"x\") _, preds = torch.max(outputs, 1) loss =", "plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_acc,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1,", "min_val_loss = np.Inf epochs_no_improve = 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs", "in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x", "min_val_loss = val_loss else: epochs_no_improve += 1 # Check early stopping condition if", "criterion, optimizer,device, num_epochs=30) #train model #test the model correct = 0 topk =", "n_epochs_stop = 5 min_val_loss = np.Inf epochs_no_improve = 0 for epoch in range(num_epochs):", "tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30) #train model #test the model correct", "running_corrects = 0 loader = dataloaders[phase] # Iterate over data. for inputs, labels", "0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])", "% (100 * correct / total)) print('Accuracy of the top 5 on the", "the model correct = 0 topk = 0 total = 0 testloader =", "#optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.1,weight_decay= 0.001) device = torch.device(\"cuda:0\"", "loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / len(dataloaders[phase].dataset)", "#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer = torch.optim.SGD(model_ft.parameters(), lr=0.001) #weight_decay #optimizer", "# train the model # takes the model, dataloaders, criterion, optimizer, device(GPU or", "1)) print('-' * 10) #scheduler.step(epoch) #for lr_scheduler # Each epoch has a training", "since = time.time() val_acc_history = [] val_loss_history = [] tr_acc_history = [] tr_loss_history", "# Set model to training mode else: model.eval() # Set model to evaluate", "True #model_ft.features[-2].requires_grad = True #model_ft.features[-3].requires_grad = True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last", "\"dataset\" num_classes = 10 batch_size = 32 image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])", "= 0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' *", "i in range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy of the model", "validation accuracy, train losses, train accuracy and number of epochs respectively. def plot_graph(val_loss,", "0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224,", "val_acc, tr_loss, tr_acc, num_epochs): plt.subplot(211) plt.title(\"Loss plots vs. Number of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\")", "= True model_ft.classifier[6] = nn.Linear(4096,10) #modify the last layer # specify loss function", "the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss = train_model(model_ft, dataloaders, criterion, optimizer,device, num_epochs=30)", "model # takes the model, dataloaders, criterion, optimizer, device(GPU or CPU) and number", "loss/accuracy plots and save it. # takes array of validation losses, validation accuracy,", "'validation' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase", "print(\"in dataloaders\", end=\" \") # zero the parameter gradients optimizer.zero_grad() # forward #", "torchvision import datasets, models, transforms from torch.autograd import Variable import matplotlib.pyplot as plt", "topk = 0 total = 0 testloader = dataloaders['test'] with torch.no_grad(): for data", "print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) # deep copy the model if", "epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation':", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") model_ft = model_ft.to(device) #send the model", "as plt import time import os import copy import tensorflow as tf #", "accuracy, validation loss, train accuracy, train loss respectively def train_model(model, dataloaders, criterion, optimizer,", "== 'validation' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if", "import tensorflow as tf # plot the train and validation loss/accuracy plots and", "plt.plot(range(1,num_epochs+1),tr_acc,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.tight_layout() plt.savefig(\"plot.png\") # train the model # takes", "dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation', 'test']} class_names = image_datasets['train'].classes", "train accuracy, train loss respectively def train_model(model, dataloaders, criterion, optimizer, device,num_epochs=25): since =", "= [] val_loss_history = [] tr_acc_history = [] tr_loss_history = [] best_model_wts =", "= torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if", "optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase ==", "num_epochs=30) #train model #test the model correct = 0 topk = 0 total", "testloader: images, labels = data images = images.to(device) labels = labels.to(device) outputs =", "of Training Epochs\") plt.plot(range(1,num_epochs+1),val_loss,label=\"validation\") plt.plot(range(1,num_epochs+1),tr_loss,label=\"train\") plt.xticks(np.arange(1, num_epochs+1, 1.0)) plt.legend() plt.subplot(212) plt.title(\"Accuracy plots vs.", "for i in range(labels_size): if(labels[i] in classes[i]): topk += 1 print('Accuracy of the", "Set model to training mode else: model.eval() # Set model to evaluate mode", "return model, val_acc_history, val_loss_history, tr_acc_history, tr_loss_history time_elapsed = time.time() - since print('Training complete", "loss = criterion(outputs, labels) # backward + optimize only if in training phase", "x), data_transforms[x]) for x in ['train', 'validation', 'test']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size,", "model_ft.to(device) #send the model to the gpu model_ft, val_acc, val_loss, tr_acc, tr_loss =", "model_ft(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted ==", "transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([", "phase == 'validation' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict())", "/ len(dataloaders[phase].dataset) epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,", "import print_function from __future__ import division import torch import torch.nn as nn import", "dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=2) for x in ['train', 'validation', 'test']}", "mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects", "// 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model", "the model on the test images: %d %%' % (100 * correct /", "optimizer = torch.optim.Adam(model_ft.parameters(), lr=0.001) #lr_scheduler #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) #different optimizer #optimizer", "epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) if phase == 'validation': val_acc_history.append(epoch_acc) val_loss_history.append(epoch_loss) if phase ==" ]
[ "Set[str]] = {} for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for", "import Enum from typing import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import", "str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str,", "get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {}", "HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR = 3 class Submission: def", "{} def has_data(self) -> bool: return len(self.__map) > 0 def map(self, entity_type: str,", "= {} for entity_type, indexed_entities in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type,", "entities in self.get_all_entities().values(): for entity in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service,", "entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str,", "class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR = 3 class Submission:", "in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index, attributes)", "Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for index, entity in", "return len(self.__map) > 0 def map(self, entity_type: str, index: str, attributes: dict) ->", "Dict[str, Set[str]] = {} for entities in self.get_all_entities().values(): for entity in entities: for", "Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type in self.get_entity_types(): for linked_entity", "def __init__(self, collider: HandleCollision = None): self.__collider = collider if collider else HandleCollision.UPDATE", "entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type:", "None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types():", "self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if", "True return False def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str,", "self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index,", "errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors =", "get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {}", "ERROR = 3 class Submission: def __init__(self, collider: HandleCollision = None): self.__collider =", "for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str,", "entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions:", "Dict[str, Dict[str, dict]]: view = {} for entity_type, indexed_entities in self.__map.items(): for index,", "if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str,", "= set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity:", "entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) ->", "entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool: for entities in self.get_all_entities().values():", "Dict[str, Entity]] = {} def has_data(self) -> bool: return len(self.__map) > 0 def", "HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self) -> bool: return len(self.__map)", "HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider ==", "self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod", "index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self)", "self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type: str) ->", "return errors def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]: view", "all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]:", "bool: for entities in self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return True", "entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes: dict) ->", "-> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity", "type_errors: errors[entity_type] = type_errors return errors def as_dict(self, string_lists: bool = False) ->", "return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for", "return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type:", "str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for index,", "= self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities", "in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service,", "= entity return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type:", "dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity:", "self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str,", "all_accessions: Dict[str, Set[str]] = {} for entities in self.get_all_entities().values(): for entity in entities:", "= None): self.__collider = collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]]", "-> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities =", "index, attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return", "KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def", "for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def", "all_entities def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities = set() for", "-> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str) -> Entity:", "Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return", "service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool: for", "type_errors: Dict[str, Dict[str, List[str]]] = {} for index, entity in self.__map[entity_type].items(): if entity.has_errors():", "return True return False def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors:", "Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type", "index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a:", "in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b:", "self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type", "errors[entity_type] = type_errors return errors def as_dict(self, string_lists: bool = False) -> Dict[str,", "{} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors", "if type_errors: errors[entity_type] = type_errors return errors def as_dict(self, string_lists: bool = False)", "entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors", "= {} def has_data(self) -> bool: return len(self.__map) > 0 def map(self, entity_type:", "in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool: for entities in", "None): self.__collider = collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] =", "class Submission: def __init__(self, collider: HandleCollision = None): self.__collider = collider if collider", "Dict[str, Set[str]] = {} for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type):", "len(self.__map) > 0 def map(self, entity_type: str, index: str, attributes: dict) -> Entity:", "raise IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE:", "entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {}", "string_lists: bool = False) -> Dict[str, Dict[str, dict]]: view = {} for entity_type,", "index)) return entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]]", "entity_type: str, index: str, attributes: dict) -> Entity: if entity_type in self.__map and", "False def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]]", "ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str) -> Entity: return", "collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self) -> bool:", "1 OVERWRITE = 2 ERROR = 3 class Submission: def __init__(self, collider: HandleCollision", "False) -> Dict[str, Dict[str, dict]]: view = {} for entity_type, indexed_entities in self.__map.items():", "return all_accessions def has_errors(self) -> bool: for entities in self.get_all_entities().values(): for entity in", "from enum import Enum from typing import List, Set, KeysView, Dict, ValuesView from", "-> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for index, entity", "as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]: view = {} for", "def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str,", "-> Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities", "entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str,", "List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE =", "Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type in self.get_entity_types():", "Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type in", "entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str)", "ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities", "{} for entity_type, indexed_entities in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index]", "enum import Enum from typing import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity", "self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type,", "-> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for", "entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]:", "if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default is UPDATE existing_entity.attributes.update(attributes)", "index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self) -> KeysView[str]: return", "entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions():", "self.get_all_entities().values(): for entity in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return", "-> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities in self.get_all_entities().values(): for", "{} for entities in self.get_all_entities().values(): for entity in entities: for service, accession in", "set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {}", "-> bool: return len(self.__map) > 0 def map(self, entity_type: str, index: str, attributes:", "collider: HandleCollision = None): self.__collider = collider if collider else HandleCollision.UPDATE self.__map: Dict[str,", "Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities in self.get_all_entities().values(): for entity in", "entity_type: str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) ->", "entity in entities: if entity.has_errors(): return True return False def get_errors(self, entity_type: str)", "Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity =", "== HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider", "== HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default is UPDATE existing_entity.attributes.update(attributes) return existing_entity", "entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity,", "entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) ->", "self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self)", "map(self, entity_type: str, index: str, attributes: dict) -> Entity: if entity_type in self.__map", "linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]]", "all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def", "dict) -> Entity: if entity_type in self.__map and index in self.__map[entity_type]: entity =", "entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity: if", "return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for", "in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self)", "Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types():", "view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier)", "def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] =", "get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type", "Entity]] = {} def has_data(self) -> bool: return len(self.__map) > 0 def map(self,", "self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def", "Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes: dict)", "Entity, entity_type: str) -> Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type,", "Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type in self.get_entity_types(): for linked_entity in", "self.__collider = collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {}", "= 2 ERROR = 3 class Submission: def __init__(self, collider: HandleCollision = None):", "Submission: def __init__(self, collider: HandleCollision = None): self.__collider = collider if collider else", "for entities in self.get_all_entities().values(): for entity in entities: for service, accession in entity.get_accessions():", "self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity", "in self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return True return False def", "str, attributes: dict) -> Entity: if entity_type in self.__map and index in self.__map[entity_type]:", "accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities", "index: str, attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index}", "import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE", "str) -> Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return", "Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities in self.get_all_entities().values(): for entity", "from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR", "KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE", "= collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def", "self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type:", "for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity,", "set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity)", "entities: if entity.has_errors(): return True return False def get_errors(self, entity_type: str) -> Dict[str,", "in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view", "all_accessions def has_errors(self) -> bool: for entities in self.get_all_entities().values(): for entity in entities:", "= self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default is", "Dict[str, Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if", "entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self)", "Dict[str, Dict[str, List[str]]] = {} for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index]", "exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else:", "indexed_entities in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return", "in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str,", "self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default is UPDATE", "def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return", "Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type)", "linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions", "Set[str]] = {} for entities in self.get_all_entities().values(): for entity in entities: for service,", "= {} for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service,", "has_errors(self) -> bool: for entities in self.get_all_entities().values(): for entity in entities: if entity.has_errors():", "self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return True return False def get_errors(self,", "{} for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession", "entity_type, indexed_entities in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists)", "get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type] =", "= {} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self,", "Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE =", "= self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] =", "entity in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def", "Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for index, entity in self.__map[entity_type].items():", "return all_entities def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities = set()", "<reponame>ebi-ait/submission-broker from enum import Enum from typing import List, Set, KeysView, Dict, ValuesView", "accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions:", "attributes: dict) -> Entity: if entity_type in self.__map and index in self.__map[entity_type]: entity", "from typing import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class", "else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def", "entity_type: str, index: str, attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise", "existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: #", "List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type]", "List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors", "entity.has_errors(): return True return False def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]:", "import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR = 3", "__init__(self, collider: HandleCollision = None): self.__collider = collider if collider else HandleCollision.UPDATE self.__map:", "= {} for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return", "else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self) -> bool: return", "type_errors return errors def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]:", "def has_data(self) -> bool: return len(self.__map) > 0 def map(self, entity_type: str, index:", "self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self) -> bool: return len(self.__map) >", "entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) ->", "for entity in entities: if entity.has_errors(): return True return False def get_errors(self, entity_type:", "= entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def", "{}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in", "def has_errors(self) -> bool: for entities in self.get_all_entities().values(): for entity in entities: if", "= {} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] =", "3 class Submission: def __init__(self, collider: HandleCollision = None): self.__collider = collider if", "{} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity:", "entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self,", "def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type]", "def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities = set() for index", "if entity.has_errors(): return True return False def get_errors(self, entity_type: str) -> Dict[str, Dict[str,", "Dict[str, dict]]: view = {} for entity_type, indexed_entities in self.__map.items(): for index, entity", "__handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR:", "in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type: str)", "Dict[str, List[str]]]] = {} for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors:", "entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity: if self.__collider", "index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index,", "HandleCollision = None): self.__collider = collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str,", "type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]]", "attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.')", "def __handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity: if self.__collider ==", "get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type,", "for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors def", "submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR =", "if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index]", "accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool: for entities", "for entity_type, indexed_entities in self.__map.items(): for index, entity in indexed_entities.items(): view.setdefault(entity_type, {})[index] =", "def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str, List[str]]]] =", "{} for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors() return type_errors", "Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def", "self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors def as_dict(self,", "= type_errors return errors def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str,", "attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity", "errors def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]: view =", "Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities = {}", "in entities: if entity.has_errors(): return True return False def get_errors(self, entity_type: str) ->", "for entity_type in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return", "and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type,", "get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities = set() for index in", "-> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type in self.get_entity_types(): for", "-> Dict[str, ValuesView[Entity]]: all_entities = {} for entity_type in self.get_entity_types(): all_entities[entity_type] = self.get_entities(entity_type)", "self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession)", "self.get_entities(entity_type) return all_entities def get_linked_entities(self, entity: Entity, entity_type: str) -> Set[Entity]: entities =", "has_data(self) -> bool: return len(self.__map) > 0 def map(self, entity_type: str, index: str,", "type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors def as_dict(self, string_lists:", "Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self) -> KeysView[str]:", "index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity) -> Dict[str,", "def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for", "def map(self, entity_type: str, index: str, attributes: dict) -> Entity: if entity_type in", "Dict[str, List[str]]] = {} for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] =", "return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) ->", "str, index: str, attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index", "in self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity", "attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys()", "self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default is UPDATE existing_entity.attributes.update(attributes) return", "return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str) -> Entity: return self.__map.get(entity_type,", "Dict[str, Dict[str, Entity]] = {} def has_data(self) -> bool: return len(self.__map) > 0", "{index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes =", "entities in self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return True return False", "if entity_type in self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes)", "link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes:", "in self.get_all_entities().values(): for entity in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession)", "= Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index] = entity return entity def get_entity_types(self) ->", "UPDATE = 1 OVERWRITE = 2 ERROR = 3 class Submission: def __init__(self,", "for entities in self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return True return", "List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for index, entity in self.__map[entity_type].items(): if", "get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities in self.get_all_entities().values():", "for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in", "0 def map(self, entity_type: str, index: str, attributes: dict) -> Entity: if entity_type", "Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str, attributes: dict) -> Entity:", "str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]: all_entities", "for entity in entities: for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions", "entity return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str)", "return False def get_errors(self, entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str,", "entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] = {} for entity_type in", "accessions: Dict[str, Set[str]] = {} for entity_type in self.get_entity_types(): for linked_entity in self.get_linked_entities(entity,", "= False) -> Dict[str, Dict[str, dict]]: view = {} for entity_type, indexed_entities in", "dict]]: view = {} for entity_type, indexed_entities in self.__map.items(): for index, entity in", "= self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors def as_dict(self, string_lists: bool", "= 3 class Submission: def __init__(self, collider: HandleCollision = None): self.__collider = collider", "{}).values() def get_entity(self, entity_type: str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None)", "entity_type: str) -> Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index))", "type_errors[index] = entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors:", "= entity.get_errors() return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str,", "-> bool: for entities in self.get_all_entities().values(): for entity in entities: if entity.has_errors(): return", "index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self) -> Dict[str, ValuesView[Entity]]:", "@staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index:", "Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2 ERROR = 3 class", "-> Dict[str, Dict[str, dict]]: view = {} for entity_type, indexed_entities in self.__map.items(): for", "2 ERROR = 3 class Submission: def __init__(self, collider: HandleCollision = None): self.__collider", "if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self) ->", "entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]:", "in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def", "Entity: if entity_type in self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index,", "List[str]]] = {} for index, entity in self.__map[entity_type].items(): if entity.has_errors(): type_errors[index] = entity.get_errors()", "def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] = {} for entities in", "{})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier)", "already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes", "str, index: str, attributes: dict) -> Entity: if entity_type in self.__map and index", "typing import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum):", "str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index: str) ->", "bool: return len(self.__map) > 0 def map(self, entity_type: str, index: str, attributes: dict)", "for linked_entity in self.get_linked_entities(entity, entity_type): for service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return", "IndexError(f'Index {index} already exists.') existing_entity: Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes", "view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str,", "= {} for entities in self.get_all_entities().values(): for entity in entities: for service, accession", "get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self, entity_type: str, index:", "index: str, attributes: dict) -> Entity: if entity_type in self.__map and index in", "all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool: for entities in self.get_all_entities().values(): for", "indexed_entities.items(): view.setdefault(entity_type, {})[index] = entity.as_dict(string_lists=string_lists) return view @staticmethod def link_entities(entity_a: Entity, entity_b: Entity):", "OVERWRITE = 2 ERROR = 3 class Submission: def __init__(self, collider: HandleCollision =", "in self.get_entity_types(): type_errors = self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors def", "-> KeysView[str]: return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values()", "collider if collider else HandleCollision.UPDATE self.__map: Dict[str, Dict[str, Entity]] = {} def has_data(self)", "self.get_errors(entity_type) if type_errors: errors[entity_type] = type_errors return errors def as_dict(self, string_lists: bool =", "= 1 OVERWRITE = 2 ERROR = 3 class Submission: def __init__(self, collider:", "def get_entity(self, entity_type: str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def", "Entity = self.__map[entity_type][index] if self.__collider == HandleCollision.OVERWRITE: existing_entity.attributes = attributes else: # Default", "bool = False) -> Dict[str, Dict[str, dict]]: view = {} for entity_type, indexed_entities", "entity_type: str) -> Dict[str, Dict[str, List[str]]]: type_errors: Dict[str, Dict[str, List[str]]] = {} for", "self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else: entity =", "accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str, Set[str]] =", "get_entity(self, entity_type: str, index: str) -> Entity: return self.__map.get(entity_type, {}).get(index, None) def get_all_entities(self)", "Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1", "> 0 def map(self, entity_type: str, index: str, attributes: dict) -> Entity: if", "entity = self.__handle_collision(entity_type, index, attributes) else: entity = Entity(entity_type, index, attributes) self.__map.setdefault(entity_type, {})[index]", "view = {} for entity_type, indexed_entities in self.__map.items(): for index, entity in indexed_entities.items():", "{})[index] = entity return entity def get_entity_types(self) -> KeysView[str]: return self.__map.keys() def get_entities(self,", "entities = set() for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self,", "def link_entities(entity_a: Entity, entity_b: Entity): entity_a.add_link_id(entity_b.identifier) entity_b.add_link_id(entity_a.identifier) def __handle_collision(self, entity_type: str, index: str,", "service, accession in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]:", "in linked_entity.get_accessions(): accessions.setdefault(service, set()).add(accession) return accessions def get_all_accessions(self) -> Dict[str, Set[str]]: all_accessions: Dict[str,", "return self.__map.keys() def get_entities(self, entity_type: str) -> ValuesView[Entity]: return self.__map.get(entity_type, {}).values() def get_entity(self,", "set()).add(accession) return all_accessions def has_errors(self) -> bool: for entities in self.get_all_entities().values(): for entity", "for index in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity) ->", "ValuesView from submission_broker.submission.entity import Entity class HandleCollision(Enum): UPDATE = 1 OVERWRITE = 2", "entity: Entity, entity_type: str) -> Set[Entity]: entities = set() for index in entity.get_linked_indexes(entity_type):", "entity_type in self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type, index, attributes) else:", "str, attributes: dict) -> Entity: if self.__collider == HandleCollision.ERROR: raise IndexError(f'Index {index} already", "def as_dict(self, string_lists: bool = False) -> Dict[str, Dict[str, dict]]: view = {}", "in entity.get_linked_indexes(entity_type): entities.add(self.get_entity(entity_type, index)) return entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]:", "Enum from typing import List, Set, KeysView, Dict, ValuesView from submission_broker.submission.entity import Entity", "for service, accession in entity.get_accessions(): all_accessions.setdefault(service, set()).add(accession) return all_accessions def has_errors(self) -> bool:", "return type_errors def get_all_errors(self) -> Dict[str, Dict[str, Dict[str, List[str]]]]: errors: Dict[str, Dict[str, Dict[str,", "return entities def get_linked_accessions(self, entity: Entity) -> Dict[str, Set[str]]: accessions: Dict[str, Set[str]] =", "-> Entity: if entity_type in self.__map and index in self.__map[entity_type]: entity = self.__handle_collision(entity_type," ]
[ "np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval) df =", "import YahooFinancials import pandas as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials", "import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate,", "as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data =", "pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate,", "numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval)", "<gh_stars>0 from yahoofinancials import YahooFinancials import pandas as pd import numpy as np", "def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval) df = pd.DataFrame(data[ticker]['prices'])", "yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval) df = pd.DataFrame(data[ticker]['prices']) return df[datatype].to_numpy()", "YahooFinancials import pandas as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials =", "GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval) df = pd.DataFrame(data[ticker]['prices']) return", "pandas as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data", "import pandas as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker)", "from yahoofinancials import YahooFinancials import pandas as pd import numpy as np def", "as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray: yahoo_financials = YahooFinancials(ticker) data = yahoo_financials.get_historical_price_data(start_date=startdate, end_date=enddate, time_interval=timeinterval) df", "yahoofinancials import YahooFinancials import pandas as pd import numpy as np def GetYahooFinanceData(ticker:str,startdate:str,enddate:str,timeinterval:str,datatype:str)->np.ndarray:" ]
[ "#Alternate solution # counter = collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha", "<filename>leetcode/RansomNote.py # Leetcode # 383. Ransom Note class Solution: def canConstruct(self, ransomNote: str,", "ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return True #################################################################### # #Alternate solution", "Leetcode # 383. Ransom Note class Solution: def canConstruct(self, ransomNote: str, magazine: str)", "mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1)", "collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False", "# if alpha_count > 1: # counter[alpha] = counter.get(alpha) - 1 # elif", "= counter.get(alpha) - 1 # elif alpha_count == 1: # counter.pop(alpha) # else:", "counter.get(alpha) - 1 # elif alpha_count == 1: # counter.pop(alpha) # else: #", "= counter.get(alpha, -1) # if alpha_count > 1: # counter[alpha] = counter.get(alpha) -", "return False # if not ransomNote: # return True # else: # return", "alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if alpha_count", "383. Ransom Note class Solution: def canConstruct(self, ransomNote: str, magazine: str) -> bool:", "True #################################################################### # #Alternate solution # counter = collections.Counter(magazine) # ransomNote = list(ransomNote)", "= collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return", "0): return False return True #################################################################### # #Alternate solution # counter = collections.Counter(magazine)", "alpha_count == 1: # counter.pop(alpha) # else: # return False # if not", "Note class Solution: def canConstruct(self, ransomNote: str, magazine: str) -> bool: if len(magazine)", "for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if", "len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha", "# counter = collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha in list(ransomNote):", "1: # counter.pop(alpha) # else: # return False # if not ransomNote: #", "> mag_counter.get(alpha, 0): return False return True #################################################################### # #Alternate solution # counter", "Ransom Note class Solution: def canConstruct(self, ransomNote: str, magazine: str) -> bool: if", "len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter:", "collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha) #", "ransomNote: str, magazine: str) -> bool: if len(magazine) < len(ransomNote): return False mag_counter", "-1) # if alpha_count > 1: # counter[alpha] = counter.get(alpha) - 1 #", "collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha,", "if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return True #################################################################### # #Alternate", "= list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha,", "if alpha_count > 1: # counter[alpha] = counter.get(alpha) - 1 # elif alpha_count", "-1) > mag_counter.get(alpha, 0): return False return True #################################################################### # #Alternate solution #", "Solution: def canConstruct(self, ransomNote: str, magazine: str) -> bool: if len(magazine) < len(ransomNote):", "- 1 # elif alpha_count == 1: # counter.pop(alpha) # else: # return", "> 1: # counter[alpha] = counter.get(alpha) - 1 # elif alpha_count == 1:", "class Solution: def canConstruct(self, ransomNote: str, magazine: str) -> bool: if len(magazine) <", "alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return True", "# elif alpha_count == 1: # counter.pop(alpha) # else: # return False #", "def canConstruct(self, ransomNote: str, magazine: str) -> bool: if len(magazine) < len(ransomNote): return", "in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if alpha_count >", "-> bool: if len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter =", "= collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha)", "False # if not ransomNote: # return True # else: # return False", "= collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1) >", "counter = collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha in list(ransomNote): #", "# return False # if not ransomNote: # return True # else: #", "1 # elif alpha_count == 1: # counter.pop(alpha) # else: # return False", "list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1)", "1: # counter[alpha] = counter.get(alpha) - 1 # elif alpha_count == 1: #", "alpha_count = counter.get(alpha, -1) # if alpha_count > 1: # counter[alpha] = counter.get(alpha)", "# #Alternate solution # counter = collections.Counter(magazine) # ransomNote = list(ransomNote) # for", "else: # return False # if not ransomNote: # return True # else:", "list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if alpha_count > 1:", "# alpha_count = counter.get(alpha, -1) # if alpha_count > 1: # counter[alpha] =", "return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if", "# ransomNote = list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count", "== 1: # counter.pop(alpha) # else: # return False # if not ransomNote:", "alpha_count > 1: # counter[alpha] = counter.get(alpha) - 1 # elif alpha_count ==", "< len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in", "in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return True ####################################################################", "return False return True #################################################################### # #Alternate solution # counter = collections.Counter(magazine) #", "for alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return", "# 383. Ransom Note class Solution: def canConstruct(self, ransomNote: str, magazine: str) ->", "# for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) #", "ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0): return False return True #################################################################### #", "False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha,", "return True #################################################################### # #Alternate solution # counter = collections.Counter(magazine) # ransomNote =", "mag_counter.get(alpha, 0): return False return True #################################################################### # #Alternate solution # counter =", "ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if alpha_count > 1: # counter[alpha]", "ransom_counter = collections.Counter(ransomNote) for alpha in ransom_counter: if ransom_counter.get(alpha, -1) > mag_counter.get(alpha, 0):", "# else: # return False # if not ransomNote: # return True #", "counter.get(alpha, -1) # if alpha_count > 1: # counter[alpha] = counter.get(alpha) - 1", "magazine: str) -> bool: if len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine)", "if len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote) for", "str) -> bool: if len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter", "bool: if len(magazine) < len(ransomNote): return False mag_counter = collections.Counter(magazine) ransom_counter = collections.Counter(ransomNote)", "counter.pop(alpha) # else: # return False # if not ransomNote: # return True", "ransomNote = list(ransomNote) # for alpha in list(ransomNote): # ransomNote.remove(alpha) # alpha_count =", "#################################################################### # #Alternate solution # counter = collections.Counter(magazine) # ransomNote = list(ransomNote) #", "canConstruct(self, ransomNote: str, magazine: str) -> bool: if len(magazine) < len(ransomNote): return False", "# Leetcode # 383. Ransom Note class Solution: def canConstruct(self, ransomNote: str, magazine:", "elif alpha_count == 1: # counter.pop(alpha) # else: # return False # if", "counter[alpha] = counter.get(alpha) - 1 # elif alpha_count == 1: # counter.pop(alpha) #", "# counter[alpha] = counter.get(alpha) - 1 # elif alpha_count == 1: # counter.pop(alpha)", "False return True #################################################################### # #Alternate solution # counter = collections.Counter(magazine) # ransomNote", "# counter.pop(alpha) # else: # return False # if not ransomNote: # return", "solution # counter = collections.Counter(magazine) # ransomNote = list(ransomNote) # for alpha in", "str, magazine: str) -> bool: if len(magazine) < len(ransomNote): return False mag_counter =", "# ransomNote.remove(alpha) # alpha_count = counter.get(alpha, -1) # if alpha_count > 1: #" ]
[ "'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process =", "<reponame>kelvingakuo/INCITEFUL-DATA import subprocess def runCrawler(what): if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ &&", "'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout =", "== 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process", "runCrawler(what): if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o", "== 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what", "crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy", "elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json'", "'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what ==", "artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True) output, error =", "&& scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell", "subprocess def runCrawler(what): if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl", "-o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True) output, error", "'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command =", "= 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command", "lyricsfreak -o lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl", "import subprocess def runCrawler(what): if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy", "-o lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo", "&& scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/", "lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o", "WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE,", "crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True)", "def runCrawler(what): if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak", "command = 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(),", "LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command = 'cd", "if(what == 'lyrics'): command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json'", "= 'cd WikipediaCrawler/WikipediaCrawler/ && scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout", "scrapy crawl artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell =", "process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True) output, error = process.communicate()", "scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'): command = 'cd WikipediaCrawler/WikipediaCrawler/ &&", "artistInfo -o artist_data.json' process = subprocess.Popen(command.split(), stdout = subprocess.PIPE, shell = True) output,", "command = 'cd LyricsFreakCrawler/LyricsFreakCrawler/ && scrapy crawl lyricsfreak -o lyrics.json' elif(what == 'artistData'):" ]
[ "def index(): global count count += int(request.body.read()) return b'' def show(): prev =", "prev) / dur), 'ops') start = now prev = count threading.Thread(target=show).start() run(host='localhost', port=7000,", "post, run, request import threading import time count = 0 @post('/') def index():", "<filename>example/benchmark/counter.py from bottle import post, run, request import threading import time count =", "/ dur), 'ops') start = now prev = count threading.Thread(target=show).start() run(host='localhost', port=7000, quiet=True)", "= 0 while True: start = time.time() time.sleep(1) now = time.time() dur =", "start print(int((count - prev) / dur), 'ops') start = now prev = count", "0 @post('/') def index(): global count count += int(request.body.read()) return b'' def show():", "count += int(request.body.read()) return b'' def show(): prev = 0 while True: start", "= now - start print(int((count - prev) / dur), 'ops') start = now", "import time count = 0 @post('/') def index(): global count count += int(request.body.read())", "+= int(request.body.read()) return b'' def show(): prev = 0 while True: start =", "return b'' def show(): prev = 0 while True: start = time.time() time.sleep(1)", "time.sleep(1) now = time.time() dur = now - start print(int((count - prev) /", "count count += int(request.body.read()) return b'' def show(): prev = 0 while True:", "run, request import threading import time count = 0 @post('/') def index(): global", "= 0 @post('/') def index(): global count count += int(request.body.read()) return b'' def", "count = 0 @post('/') def index(): global count count += int(request.body.read()) return b''", "print(int((count - prev) / dur), 'ops') start = now prev = count threading.Thread(target=show).start()", "index(): global count count += int(request.body.read()) return b'' def show(): prev = 0", "now - start print(int((count - prev) / dur), 'ops') start = now prev", "request import threading import time count = 0 @post('/') def index(): global count", "time.time() dur = now - start print(int((count - prev) / dur), 'ops') start", "def show(): prev = 0 while True: start = time.time() time.sleep(1) now =", "now = time.time() dur = now - start print(int((count - prev) / dur),", "import post, run, request import threading import time count = 0 @post('/') def", "0 while True: start = time.time() time.sleep(1) now = time.time() dur = now", "- prev) / dur), 'ops') start = now prev = count threading.Thread(target=show).start() run(host='localhost',", "import threading import time count = 0 @post('/') def index(): global count count", "threading import time count = 0 @post('/') def index(): global count count +=", "time count = 0 @post('/') def index(): global count count += int(request.body.read()) return", "bottle import post, run, request import threading import time count = 0 @post('/')", "True: start = time.time() time.sleep(1) now = time.time() dur = now - start", "time.time() time.sleep(1) now = time.time() dur = now - start print(int((count - prev)", "dur = now - start print(int((count - prev) / dur), 'ops') start =", "from bottle import post, run, request import threading import time count = 0", "= time.time() dur = now - start print(int((count - prev) / dur), 'ops')", "= time.time() time.sleep(1) now = time.time() dur = now - start print(int((count -", "prev = 0 while True: start = time.time() time.sleep(1) now = time.time() dur", "global count count += int(request.body.read()) return b'' def show(): prev = 0 while", "b'' def show(): prev = 0 while True: start = time.time() time.sleep(1) now", "start = time.time() time.sleep(1) now = time.time() dur = now - start print(int((count", "- start print(int((count - prev) / dur), 'ops') start = now prev =", "@post('/') def index(): global count count += int(request.body.read()) return b'' def show(): prev", "show(): prev = 0 while True: start = time.time() time.sleep(1) now = time.time()", "int(request.body.read()) return b'' def show(): prev = 0 while True: start = time.time()", "while True: start = time.time() time.sleep(1) now = time.time() dur = now -" ]
[ "A QuerySet of Node objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node):", "root of the tree being requested. This is be the root kStructuredNode. RETURNS", "''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE", "'level' not in kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level", "create_tree(self, nodes=[]): ''' Save a list of nodes to the database. ARGS ----", "FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id)", "level if 'level' not in kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1", "[Node,] A QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes def", "for child in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self,", "RETURNS ------- nodes: [Node,] A QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path')", "False def ancestral_nodes(self, node): ''' Get a list of all nodes that are", "ARGS ---- node: Node The node. RETURNS ------- QuerySet: A QuerySet of Node", "nodes to the database. ARGS ---- nodes: [Node,] A list of Node objects", "of Node Objects, ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def", "A list of Node objects RETURNS ------- Bool False if nodes is empty", "can be used in any way. ''' from django_ancestry_relation.classes import StructuredNode children_count =", "self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count > 0: children = self.children(node)", "leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM", "Node The root Node of this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet", "node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete list", "import models class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a Node object.", "nodes that are ascendants or descendants of the given node. ARGS ---- node:", "n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): ''' Get a structured", "n2.parent_node_id = n1.id) = 0 AND n1.root_node_id = '{}' ORDER BY n1.level ASC", "class found at classes.StructuredNode. ARGS ---- node: Node The root of the tree", "node.') else: leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT", "if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id", "node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id ) if", "*args, **kwargs): ''' Create a Node object. Generates level and path automatically if", "Node The Node object, unsaved. ''' node = self.model(**kwargs) # generate level if", "this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of Node objects. ''' nodes", "if ones are not supplied in kwargs. RETURNS ------- node: Node The Node", "node: Node The root Node of this tree/subtree. RETURNS ------- nodes: [Node,] A", "flat list of node descendents, ordered according to their placement in the hierarchy.", "0 AND n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves", "Uses the StructuredNode class found at classes.StructuredNode. ARGS ---- node: Node The root", "A single StructuredNode object. NOTE ---- This is slow. Do not use if", "''' Get a complete list of all nodes that inheiret from the given", "---- nodes: [Node,] A list of Node objects RETURNS ------- Bool False if", "objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id != node.root_node_id:", "children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self, node): ''' Retrieve", "= node return node def create_tree(self, nodes=[]): ''' Save a list of nodes", "children = self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return", "single StructuredNode object. NOTE ---- This is slow. Do not use if descendants_ordered()", "level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the", "Create a Node object. Generates level and path automatically if ones are not", "A QuerySet of Node Objects, ordered by level. ''' node_ids = node.path.split(',') return", "not supplied in kwargs. RETURNS ------- node: Node The Node object, unsaved. '''", "Get a complete list of all nodes that inheiret from the given node.", "if node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level = 1 # generate", "django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id = '{}' ORDER", "of Nodes. Uses the StructuredNode class found at classes.StructuredNode. ARGS ---- node: Node", "kwargs. RETURNS ------- node: Node The Node object, unsaved. ''' node = self.model(**kwargs)", "if nodes is empty or save to DB failed, True if saved to", "''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id != node.root_node_id: raise", "Bool False if nodes is empty or save to DB failed, True if", "Get the immediate children of the given node. ARGS ---- node: Node The", "Node object. Generates level and path automatically if ones are not supplied in", "False if nodes is empty or save to DB failed, True if saved", "= '{}'.format( node.id ) if node.level == 1: node.root_node = node return node", "def descendants(self, node): ''' Get a complete list of all nodes that inheiret", "of the tree being requested. This is be the root kStructuredNode. RETURNS -------", "Generates level and path automatically if ones are not supplied in kwargs. RETURNS", "A QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self,", "node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id ) if node.level == 1:", "== 1: node.root_node = node return node def create_tree(self, nodes=[]): ''' Save a", "''' Get a list of all nodes that are ascendants or descendants of", "Node Objects, ordered by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self,", "RETURNS ------- QuerySet: A QuerySet of Node objects. ''' return self.filter( parent_node=node ).order_by('level')", "node. RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered by level. '''", "WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id = '{}' ORDER BY n1.level", "objects. ''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just a", "node): ''' Retrieve a flat list of node descendents, ordered according to their", "return nodes def delete_tree(self, node): ''' Just a wrapper for Django Model .delete", "tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self, node): ''' Retrieve a", "list of node descendents, ordered according to their placement in the hierarchy. ARGS", "Do not use if descendants_ordered() can be used in any way. ''' from", "classes.StructuredNode. ARGS ---- node: Node The root of the tree being requested. This", "BY n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): ''' Get a", "ancestral_nodes(self, node): ''' Get a list of all nodes that are ascendants or", "ARGS ---- node: Node The root of the tree being requested. This is", "True except: return False return False def ancestral_nodes(self, node): ''' Get a list", "else: node.path = '{}'.format( node.id ) if node.level == 1: node.root_node = node", "''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node )", "tree def descendants_ordered(self, node): ''' Retrieve a flat list of node descendents, ordered", "that inheiret from the given node. ARGS ---- node: Node The node. RETURNS", "node: Node The node. RETURNS ------- QuerySet: A QuerySet of Node objects. '''", "root kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode object. NOTE ---- This", "node): ''' Get a list of all nodes that are ascendants or descendants", "QuerySet of Node Objects, ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level')", "self.model(**kwargs) # generate level if 'level' not in kwargs: if node.parent_node_id: node.level =", "if node.id != node.root_node_id: raise Exception('node must be a root level node.') else:", "node): ''' Get a structured representation of Nodes. Uses the StructuredNode class found", "a complete list of all nodes that inheiret from the given node. ARGS", "the immediate children of the given node. ARGS ---- node: Node The node.", "= self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree", "DB failed, True if saved to DB successfully ''' if nodes: try: self.bulk_create(nodes)", "nodes that inheiret from the given node. ARGS ---- node: Node The node.", "is slow. Do not use if descendants_ordered() can be used in any way.", "root Node of this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of Node", "the hierarchy. ARGS ---- node: Node The root Node of this tree/subtree. RETURNS", "a wrapper for Django Model .delete method. Will delete a node and all", "''' Get the immediate children of the given node. ARGS ---- node: Node", "1: node.root_node = node return node def create_tree(self, nodes=[]): ''' Save a list", "all nodes that inheiret from the given node. ARGS ---- node: Node The", "object, unsaved. ''' node = self.model(**kwargs) # generate level if 'level' not in", "Objects, ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node):", "node. RETURNS ------- QuerySet: A QuerySet of Node objects. ''' return self.filter( parent_node=node", "or descendants of the given node. ARGS ---- node: Node The node. RETURNS", "self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2", "------- nodes: [Node,] A QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path') return", "Django Model .delete method. Will delete a node and all of it's descendents.", "''' Save a list of nodes to the database. ARGS ---- nodes: [Node,]", "of Node objects RETURNS ------- Bool False if nodes is empty or save", "a Node object. Generates level and path automatically if ones are not supplied", "ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): '''", "QuerySet of Node objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if", "def hierarchical_ordered(self, node): ''' Get a structured representation of Nodes. Uses the StructuredNode", "<reponame>aaronmarkey/django-ancestry-relation from django.db import models class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create", "''' node = self.model(**kwargs) # generate level if 'level' not in kwargs: if", "list of all nodes that inheiret from the given node. ARGS ---- node:", "return leaves def hierarchical_ordered(self, node): ''' Get a structured representation of Nodes. Uses", "django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) =", "self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete list of all nodes that", "self.bulk_create(nodes) return True except: return False return False def ancestral_nodes(self, node): ''' Get", "not use if descendants_ordered() can be used in any way. ''' from django_ancestry_relation.classes", "object. Generates level and path automatically if ones are not supplied in kwargs.", "''' Just a wrapper for Django Model .delete method. Will delete a node", "object. NOTE ---- This is slow. Do not use if descendants_ordered() can be", "= node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete list of", "representation of Nodes. Uses the StructuredNode class found at classes.StructuredNode. ARGS ---- node:", "# generate path if 'path' not in kwargs: if node.parent_node: node.path = '{},{}'.format(", "node.id ) if node.level == 1: node.root_node = node return node def create_tree(self,", "def leaves(self, node): if node.id != node.root_node_id: raise Exception('node must be a root", "in kwargs. RETURNS ------- node: Node The Node object, unsaved. ''' node =", "Node Objects, ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self,", "a root level node.') else: leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode", "node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete list of all", "def ancestral_nodes(self, node): ''' Get a list of all nodes that are ascendants", "are not supplied in kwargs. RETURNS ------- node: Node The Node object, unsaved.", "count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id =", "The node. RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered by level.", "children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count > 0: children", "data=node ) if children_count > 0: children = self.children(node) for child in children:", "node return node def create_tree(self, nodes=[]): ''' Save a list of nodes to", "of all nodes that inheiret from the given node. ARGS ---- node: Node", ") if children_count > 0: children = self.children(node) for child in children: tree.children.append(", "''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete", "at classes.StructuredNode. ARGS ---- node: Node The root of the tree being requested.", "# generate level if 'level' not in kwargs: if node.parent_node_id: node.level = node.parent_node.level", "except: return False return False def ancestral_nodes(self, node): ''' Get a list of", "structured representation of Nodes. Uses the StructuredNode class found at classes.StructuredNode. ARGS ----", "tree being requested. This is be the root kStructuredNode. RETURNS ------- tree: StructuredNode", "a flat list of node descendents, ordered according to their placement in the", "from the given node. ARGS ---- node: Node The node. RETURNS ------- QuerySet:", "node.id ) else: node.path = '{}'.format( node.id ) if node.level == 1: node.root_node", "RETURNS ------- node: Node The Node object, unsaved. ''' node = self.model(**kwargs) #", "be a root level node.') else: leaves = self.raw( ''' SELECT * FROM", "1 # generate path if 'path' not in kwargs: if node.parent_node: node.path =", "not in kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level =", "all nodes that are ascendants or descendants of the given node. ARGS ----", "be used in any way. ''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count()", "parent_node=node ).order_by('level') def leaves(self, node): if node.id != node.root_node_id: raise Exception('node must be", "nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just a wrapper for", "leaves(self, node): if node.id != node.root_node_id: raise Exception('node must be a root level", "path automatically if ones are not supplied in kwargs. RETURNS ------- node: Node", "children of the given node. ARGS ---- node: Node The node. RETURNS -------", "import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count >", "node.parent_node.level + 1 else: node.level = 1 # generate path if 'path' not", "ordered according to their placement in the hierarchy. ARGS ---- node: Node The", "to the database. ARGS ---- nodes: [Node,] A list of Node objects RETURNS", "node.root_node_id: raise Exception('node must be a root level node.') else: leaves = self.raw(", "QuerySet: A QuerySet of Node Objects, ordered by level. ''' return self.filter( root_node=node.root_node,", "This is be the root kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode", "The Node object, unsaved. ''' node = self.model(**kwargs) # generate level if 'level'", "return True except: return False return False def ancestral_nodes(self, node): ''' Get a", ").order_by('level') def leaves(self, node): if node.id != node.root_node_id: raise Exception('node must be a", "Node The root of the tree being requested. This is be the root", "''' Create a Node object. Generates level and path automatically if ones are", "node.path = '{}'.format( node.id ) if node.level == 1: node.root_node = node return", "SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id", "n2 WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id = '{}' ORDER BY", "= self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just a wrapper for Django", "node def create_tree(self, nodes=[]): ''' Save a list of nodes to the database.", "------- QuerySet: A QuerySet of Node Objects, ordered by level. ''' node_ids =", "ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): ''' Get", "a list of all nodes that are ascendants or descendants of the given", "children(self, node): ''' Get the immediate children of the given node. ARGS ----", "root level node.') else: leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1", ") else: node.path = '{}'.format( node.id ) if node.level == 1: node.root_node =", "---- node: Node The node. RETURNS ------- QuerySet: A QuerySet of Node objects.", "empty or save to DB failed, True if saved to DB successfully '''", ".delete method. Will delete a node and all of it's descendents. ''' node.delete()", "the given node. ARGS ---- node: Node The node. RETURNS ------- QuerySet: A", "the tree being requested. This is be the root kStructuredNode. RETURNS ------- tree:", "nodes: try: self.bulk_create(nodes) return True except: return False return False def ancestral_nodes(self, node):", "RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered by level. ''' return", "level and path automatically if ones are not supplied in kwargs. RETURNS -------", "QuerySet: A QuerySet of Node Objects, ordered by level. ''' node_ids = node.path.split(',')", "else: leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*)", "node): ''' Just a wrapper for Django Model .delete method. Will delete a", "'{}'.format( node.id ) if node.level == 1: node.root_node = node return node def", "StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count > 0:", "generate level if 'level' not in kwargs: if node.parent_node_id: node.level = node.parent_node.level +", "try: self.bulk_create(nodes) return True except: return False return False def ancestral_nodes(self, node): '''", "!= node.root_node_id: raise Exception('node must be a root level node.') else: leaves =", "FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id = '{}'", "to DB failed, True if saved to DB successfully ''' if nodes: try:", "**kwargs): ''' Create a Node object. Generates level and path automatically if ones", "node. ARGS ---- node: Node The node. RETURNS ------- QuerySet: A QuerySet of", ") return leaves def hierarchical_ordered(self, node): ''' Get a structured representation of Nodes.", "ARGS ---- node: Node The root Node of this tree/subtree. RETURNS ------- nodes:", ") return tree def descendants_ordered(self, node): ''' Retrieve a flat list of node", "child ) ) return tree def descendants_ordered(self, node): ''' Retrieve a flat list", "of all nodes that are ascendants or descendants of the given node. ARGS", "tree: StructuredNode A single StructuredNode object. NOTE ---- This is slow. Do not", "+ 1 else: node.level = 1 # generate path if 'path' not in", "node): ''' Get the immediate children of the given node. ARGS ---- node:", "saved to DB successfully ''' if nodes: try: self.bulk_create(nodes) return True except: return", "StructuredNode A single StructuredNode object. NOTE ---- This is slow. Do not use", "requested. This is be the root kStructuredNode. RETURNS ------- tree: StructuredNode A single", "list of nodes to the database. ARGS ---- nodes: [Node,] A list of", "node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level = 1 # generate path", "of node descendents, ordered according to their placement in the hierarchy. ARGS ----", "= self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count > 0: children =", "save to DB failed, True if saved to DB successfully ''' if nodes:", "found at classes.StructuredNode. ARGS ---- node: Node The root of the tree being", "tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of Node objects. ''' nodes =", "= node.parent_node.level + 1 else: node.level = 1 # generate path if 'path'", "'{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): '''", "self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id != node.root_node_id: raise Exception('node must", "if descendants_ordered() can be used in any way. ''' from django_ancestry_relation.classes import StructuredNode", "Save a list of nodes to the database. ARGS ---- nodes: [Node,] A", "that are ascendants or descendants of the given node. ARGS ---- node: Node", "level node.') else: leaves = self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE", "'path' not in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else:", "= n1.id) = 0 AND n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id))", "descendants_ordered(self, node): ''' Retrieve a flat list of node descendents, ordered according to", "NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a Node object. Generates level and", "'{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id ) if node.level ==", "Node The node. RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered by", "must be a root level node.') else: leaves = self.raw( ''' SELECT *", "node): if node.id != node.root_node_id: raise Exception('node must be a root level node.')", "Get a list of all nodes that are ascendants or descendants of the", "list of all nodes that are ascendants or descendants of the given node.", "path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the immediate children of the given", "The root Node of this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of", "to their placement in the hierarchy. ARGS ---- node: Node The root Node", "are ascendants or descendants of the given node. ARGS ---- node: Node The", "descendants(self, node): ''' Get a complete list of all nodes that inheiret from", "if 'path' not in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id )", "according to their placement in the hierarchy. ARGS ---- node: Node The root", "AND n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves def", "delete_tree(self, node): ''' Just a wrapper for Django Model .delete method. Will delete", "hierarchy. ARGS ---- node: Node The root Node of this tree/subtree. RETURNS -------", "> 0: children = self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child )", "------- tree: StructuredNode A single StructuredNode object. NOTE ---- This is slow. Do", "to DB successfully ''' if nodes: try: self.bulk_create(nodes) return True except: return False", "StructuredNode class found at classes.StructuredNode. ARGS ---- node: Node The root of the", "django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if children_count", "objects RETURNS ------- Bool False if nodes is empty or save to DB", "in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path =", "of the given node. ARGS ---- node: Node The node. RETURNS ------- QuerySet:", "in the hierarchy. ARGS ---- node: Node The root Node of this tree/subtree.", "use if descendants_ordered() can be used in any way. ''' from django_ancestry_relation.classes import", "Node The node. RETURNS ------- QuerySet: A QuerySet of Node objects. ''' return", "immediate children of the given node. ARGS ---- node: Node The node. RETURNS", "---- node: Node The node. RETURNS ------- QuerySet: A QuerySet of Node Objects,", "create_node(self, *args, **kwargs): ''' Create a Node object. Generates level and path automatically", "database. ARGS ---- nodes: [Node,] A list of Node objects RETURNS ------- Bool", "True if saved to DB successfully ''' if nodes: try: self.bulk_create(nodes) return True", "if nodes: try: self.bulk_create(nodes) return True except: return False return False def ancestral_nodes(self,", "False return False def ancestral_nodes(self, node): ''' Get a list of all nodes", "return tree def descendants_ordered(self, node): ''' Retrieve a flat list of node descendents,", "ones are not supplied in kwargs. RETURNS ------- node: Node The Node object,", "1 else: node.level = 1 # generate path if 'path' not in kwargs:", "Retrieve a flat list of node descendents, ordered according to their placement in", "------- QuerySet: A QuerySet of Node Objects, ordered by level. ''' return self.filter(", "A QuerySet of Node Objects, ordered by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id)", "unsaved. ''' node = self.model(**kwargs) # generate level if 'level' not in kwargs:", "node: Node The node. RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered", "Node object, unsaved. ''' node = self.model(**kwargs) # generate level if 'level' not", "leaves def hierarchical_ordered(self, node): ''' Get a structured representation of Nodes. Uses the", "complete list of all nodes that inheiret from the given node. ARGS ----", "* FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id =", "class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a Node object. Generates level", "of Node objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id", "wrapper for Django Model .delete method. Will delete a node and all of", "Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just", "node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id )", "node: Node The root of the tree being requested. This is be the", "their placement in the hierarchy. ARGS ---- node: Node The root Node of", "''' Get a structured representation of Nodes. Uses the StructuredNode class found at", "children_count > 0: children = self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child", "The root of the tree being requested. This is be the root kStructuredNode.", "nodes def delete_tree(self, node): ''' Just a wrapper for Django Model .delete method.", "descendants_ordered() can be used in any way. ''' from django_ancestry_relation.classes import StructuredNode children_count", "return node def create_tree(self, nodes=[]): ''' Save a list of nodes to the", "nodes=[]): ''' Save a list of nodes to the database. ARGS ---- nodes:", "RETURNS ------- Bool False if nodes is empty or save to DB failed,", "Model .delete method. Will delete a node and all of it's descendents. '''", "if node.level == 1: node.root_node = node return node def create_tree(self, nodes=[]): '''", "nodes is empty or save to DB failed, True if saved to DB", "a list of nodes to the database. ARGS ---- nodes: [Node,] A list", "= self.raw( ''' SELECT * FROM django_ancestry_relation_testnode n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode", "of this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of Node objects. '''", "def descendants_ordered(self, node): ''' Retrieve a flat list of node descendents, ordered according", "DB successfully ''' if nodes: try: self.bulk_create(nodes) return True except: return False return", "def children(self, node): ''' Get the immediate children of the given node. ARGS", "Node of this tree/subtree. RETURNS ------- nodes: [Node,] A QuerySet of Node objects.", "not in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path", "tree = StructuredNode.StructuredNode( data=node ) if children_count > 0: children = self.children(node) for", "QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node):", "def delete_tree(self, node): ''' Just a wrapper for Django Model .delete method. Will", "return False def ancestral_nodes(self, node): ''' Get a list of all nodes that", "= self.model(**kwargs) # generate level if 'level' not in kwargs: if node.parent_node_id: node.level", "way. ''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node", "ARGS ---- nodes: [Node,] A list of Node objects RETURNS ------- Bool False", "is be the root kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode object.", "0: children = self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child ) )", "path if 'path' not in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id", "''' if nodes: try: self.bulk_create(nodes) return True except: return False return False def", "any way. ''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode(", "in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self, node): '''", "node.level == 1: node.root_node = node return node def create_tree(self, nodes=[]): ''' Save", "self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just a wrapper for Django Model", "a structured representation of Nodes. Uses the StructuredNode class found at classes.StructuredNode. ARGS", "node = self.model(**kwargs) # generate level if 'level' not in kwargs: if node.parent_node_id:", "---- node: Node The root Node of this tree/subtree. RETURNS ------- nodes: [Node,]", "node.root_node = node return node def create_tree(self, nodes=[]): ''' Save a list of", "[Node,] A list of Node objects RETURNS ------- Bool False if nodes is", "The node. RETURNS ------- QuerySet: A QuerySet of Node objects. ''' return self.filter(", ") ) return tree def descendants_ordered(self, node): ''' Retrieve a flat list of", "= '{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node):", "''' Retrieve a flat list of node descendents, ordered according to their placement", "kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode object. NOTE ---- This is", "if saved to DB successfully ''' if nodes: try: self.bulk_create(nodes) return True except:", "RETURNS ------- tree: StructuredNode A single StructuredNode object. NOTE ---- This is slow.", "Nodes. Uses the StructuredNode class found at classes.StructuredNode. ARGS ---- node: Node The", "node.level = 1 # generate path if 'path' not in kwargs: if node.parent_node:", "of nodes to the database. ARGS ---- nodes: [Node,] A list of Node", "slow. Do not use if descendants_ordered() can be used in any way. '''", "---- This is slow. Do not use if descendants_ordered() can be used in", "models class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a Node object. Generates", "node: Node The Node object, unsaved. ''' node = self.model(**kwargs) # generate level", "of Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node): '''", "''' nodes = self.descendants(node).order_by('path') return nodes def delete_tree(self, node): ''' Just a wrapper", "node.id != node.root_node_id: raise Exception('node must be a root level node.') else: leaves", "ordered by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): '''", ").order_by('level') def children(self, node): ''' Get the immediate children of the given node.", "RETURNS ------- QuerySet: A QuerySet of Node Objects, ordered by level. ''' node_ids", "ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): ''' Get a structured representation", "nodes: [Node,] A list of Node objects RETURNS ------- Bool False if nodes", "root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the immediate children of the", "------- Bool False if nodes is empty or save to DB failed, True", "This is slow. Do not use if descendants_ordered() can be used in any", "StructuredNode.StructuredNode( data=node ) if children_count > 0: children = self.children(node) for child in", "the StructuredNode class found at classes.StructuredNode. ARGS ---- node: Node The root of", "list of Node objects RETURNS ------- Bool False if nodes is empty or", "---- node: Node The root of the tree being requested. This is be", "n1.id) = 0 AND n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id)) )", "(SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0 AND n1.root_node_id", "supplied in kwargs. RETURNS ------- node: Node The Node object, unsaved. ''' node", "return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id != node.root_node_id: raise Exception('node", "raise Exception('node must be a root level node.') else: leaves = self.raw( '''", "kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level = 1 #", "def create_node(self, *args, **kwargs): ''' Create a Node object. Generates level and path", "used in any way. ''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree", "= StructuredNode.StructuredNode( data=node ) if children_count > 0: children = self.children(node) for child", "given node. ARGS ---- node: Node The node. RETURNS ------- QuerySet: A QuerySet", "by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get", "------- node: Node The Node object, unsaved. ''' node = self.model(**kwargs) # generate", "successfully ''' if nodes: try: self.bulk_create(nodes) return True except: return False return False", "return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the immediate children", "is empty or save to DB failed, True if saved to DB successfully", "StructuredNode object. NOTE ---- This is slow. Do not use if descendants_ordered() can", "in any way. ''' from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree =", "ascendants or descendants of the given node. ARGS ---- node: Node The node.", "QuerySet: A QuerySet of Node objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self,", "django.db import models class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a Node", "else: node.level = 1 # generate path if 'path' not in kwargs: if", "automatically if ones are not supplied in kwargs. RETURNS ------- node: Node The", "failed, True if saved to DB successfully ''' if nodes: try: self.bulk_create(nodes) return", "return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a complete list of all nodes", "from django.db import models class NodeManager(models.Manager): def create_node(self, *args, **kwargs): ''' Create a", "and path automatically if ones are not supplied in kwargs. RETURNS ------- node:", "Node objects. ''' return self.filter( parent_node=node ).order_by('level') def leaves(self, node): if node.id !=", "the root kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode object. NOTE ----", "by level. ''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get", "generate path if 'path' not in kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path,", "node.level = node.parent_node.level + 1 else: node.level = 1 # generate path if", "return False return False def ancestral_nodes(self, node): ''' Get a list of all", "of Node Objects, ordered by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def", "level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node): ''' Get a", "hierarchical_ordered(self, node): ''' Get a structured representation of Nodes. Uses the StructuredNode class", "def create_tree(self, nodes=[]): ''' Save a list of nodes to the database. ARGS", "self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self, node): ''' Retrieve a flat", "descendants of the given node. ARGS ---- node: Node The node. RETURNS -------", "the database. ARGS ---- nodes: [Node,] A list of Node objects RETURNS -------", "be the root kStructuredNode. RETURNS ------- tree: StructuredNode A single StructuredNode object. NOTE", "n1 WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0", "self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the immediate children of", "if 'level' not in kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1 else:", "Node objects RETURNS ------- Bool False if nodes is empty or save to", "''' return self.filter( root_node=node.root_node, path__contains=str(node.id) ).order_by('level') def children(self, node): ''' Get the immediate", "------- QuerySet: A QuerySet of Node objects. ''' return self.filter( parent_node=node ).order_by('level') def", "child in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def descendants_ordered(self, node):", "nodes: [Node,] A QuerySet of Node objects. ''' nodes = self.descendants(node).order_by('path') return nodes", "from django_ancestry_relation.classes import StructuredNode children_count = self.children(node).count() tree = StructuredNode.StructuredNode( data=node ) if", "n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return leaves def hierarchical_ordered(self,", "or save to DB failed, True if saved to DB successfully ''' if", "being requested. This is be the root kStructuredNode. RETURNS ------- tree: StructuredNode A", "'''.format(str(node.id)) ) return leaves def hierarchical_ordered(self, node): ''' Get a structured representation of", ") if node.level == 1: node.root_node = node return node def create_tree(self, nodes=[]):", "Get a structured representation of Nodes. Uses the StructuredNode class found at classes.StructuredNode.", "NOTE ---- This is slow. Do not use if descendants_ordered() can be used", "self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree( child ) ) return tree def", "Exception('node must be a root level node.') else: leaves = self.raw( ''' SELECT", "node descendents, ordered according to their placement in the hierarchy. ARGS ---- node:", "QuerySet of Node Objects, ordered by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level')", "= 0 AND n1.root_node_id = '{}' ORDER BY n1.level ASC '''.format(str(node.id)) ) return", "if children_count > 0: children = self.children(node) for child in children: tree.children.append( self.hierarchical_structured_tree(", "Just a wrapper for Django Model .delete method. Will delete a node and", "for Django Model .delete method. Will delete a node and all of it's", "= '{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format( node.id ) if node.level", "in kwargs: if node.parent_node_id: node.level = node.parent_node.level + 1 else: node.level = 1", "inheiret from the given node. ARGS ---- node: Node The node. RETURNS -------", "node): ''' Get a complete list of all nodes that inheiret from the", "descendents, ordered according to their placement in the hierarchy. ARGS ---- node: Node", "kwargs: if node.parent_node: node.path = '{},{}'.format( node.parent_node.path, node.id ) else: node.path = '{}'.format(", "placement in the hierarchy. ARGS ---- node: Node The root Node of this", "Objects, ordered by level. ''' node_ids = node.path.split(',') return self.filter(id__in=node_ids).order_by('level') def descendants(self, node):", "WHERE (SELECT count(*) FROM django_ancestry_relation_testnode n2 WHERE n2.parent_node_id = n1.id) = 0 AND", "= 1 # generate path if 'path' not in kwargs: if node.parent_node: node.path" ]
[ "def require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name): def thunk(context): return download_resource(name,", "except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task,", "require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name): def thunk(context): return download_resource(name, context)", "name[0] == '_': acc += '-' else: acc += name[0] return _convert_name(name[1:], acc)", "return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] = fn def", "download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url = context.base_url", "os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res)", "= _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return lambda x: x", "else '') + name[0].lower() elif name[0] == '_': acc += '-' else: acc", "DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] = fn def require(self,", "def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url =", "task def require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name): def thunk(context): return", "downloader.require(name) return lambda x: x def download_resource_task(name): def thunk(context): return download_resource(name, context) return", "acc += ('-' if len(acc) > 0 else '') + name[0].lower() elif name[0]", "if len(acc) > 0 else '') + name[0].lower() elif name[0] == '_': acc", "return resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task):", "name)) def add_resource(self, name, fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def", "download_all(self): for r in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context): resource_path", "name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return lambda x:", "None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def", "0: return acc if name[0].isupper(): acc += ('-' if len(acc) > 0 else", "isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res return _thunk name = _convert_name(task.__name__)", "len(name) == 0: return acc if name[0].isupper(): acc += ('-' if len(acc) >", "return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name):", "resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def", "fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name))", "try: print('Downloading resource %s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z:", "_convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return lambda x: x def", "import zipfile from collections import namedtuple import os def _convert_name(name, acc = ''):", "== '_': acc += '-' else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext", "%s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s", "url = context.base_url + '%s.zip' % name try: print('Downloading resource %s.' % name)", "if os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip' % name try: print('Downloading", "return task def require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name): def thunk(context):", "name, fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def get(self, name): return", "context) return thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test')) add_resources(downloader) def", "requests import io import zipfile from collections import namedtuple import os def _convert_name(name,", "def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name))", "= namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources", "zipfile from collections import namedtuple import os def _convert_name(name, acc = ''): if", "% name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.'", "z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except Exception as e: if os.path.exists(resource_path):", "_convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url", "with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except Exception", "res) return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def", "= dict() self._base_path = None self._all_requirements = [] @property def base_path(self): if self._base_path", "for r in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context): resource_path =", "def base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def", "register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res return _thunk name", "_thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return lambda", "def _thunk(res): downloader.add_resource(task, res) return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task)", "acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url =", "import requests import io import zipfile from collections import namedtuple import os def", "= Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path", "context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url = context.base_url +", "= requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path", "downloaded.' %name) return resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e", "acc += name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class", "resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if", "base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self):", "downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name):", "= [] @property def base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return", "+= ('-' if len(acc) > 0 else '') + name[0].lower() elif name[0] ==", "download_resource(name, context) return thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test')) add_resources(downloader)", "def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r) downloader", "'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path", "self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements = [] @property", "def add_resource(self, name, fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def get(self,", "acc = ''): if len(name) == 0: return acc if name[0].isupper(): acc +=", "self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path,", "is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources')", "res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name)", "+ name[0].lower() elif name[0] == '_': acc += '-' else: acc += name[0]", "_convert_name(name, acc = ''): if len(name) == 0: return acc if name[0].isupper(): acc", "import io import zipfile from collections import namedtuple import os def _convert_name(name, acc", "= fn def require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self):", "= os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip' %", "as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except Exception as e:", "len(acc) > 0 else '') + name[0].lower() elif name[0] == '_': acc +=", "downloader.add_resource(task, res) return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task", "'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path =", "download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance): # Add test", "def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test')) add_resources(downloader) def resource(name): return downloader.get(name)", "acc if name[0].isupper(): acc += ('-' if len(acc) > 0 else '') +", "Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str):", "'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn):", "thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test')) add_resources(downloader) def resource(name): return", "self._all_requirements = [] @property def base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation')", "def thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance): # Add test resource", "context.base_url + '%s.zip' % name try: print('Downloading resource %s.' % name) response =", "name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name)", "requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except", "def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name]", "dict() self._base_path = None self._all_requirements = [] @property def base_path(self): if self._base_path is", "os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name)", "return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return task def require_resource(name): downloader.require(name) return", "raise e def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res", "name): return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r) downloader = Downloader()", "downloader = Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return", "@property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path,", "os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name,", "e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str): def _thunk(res):", "print('Downloading resource %s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path)", "class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None", "namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources =", "('-' if len(acc) > 0 else '') + name[0].lower() elif name[0] == '_':", "require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r in", "as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str): def", "def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements =", "in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name)", "name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] = fn", "zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except Exception as", "get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r) downloader =", "os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return", "'_': acc += '-' else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext =", "name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def", "return lambda x: x def download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk", "add_resource(self, name, fn): self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def get(self, name):", "else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path'])", "if len(name) == 0: return acc if name[0].isupper(): acc += ('-' if len(acc)", "acc += '-' else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext',", "import os def _convert_name(name, acc = ''): if len(name) == 0: return acc", "<filename>download.py import requests import io import zipfile from collections import namedtuple import os", "%s downloaded.' %name) return resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise", "+= name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader:", "return acc if name[0].isupper(): acc += ('-' if len(acc) > 0 else '')", "resource_path url = context.base_url + '%s.zip' % name try: print('Downloading resource %s.' %", "= ''): if len(name) == 0: return acc if name[0].isupper(): acc += ('-'", "'') + name[0].lower() elif name[0] == '_': acc += '-' else: acc +=", "%name) return resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path) raise e def", "['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict()", "def download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance): # Add", "= os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name):", "''): if len(name) == 0: return acc if name[0].isupper(): acc += ('-' if", "resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip'", "= None self._all_requirements = [] @property def base_path(self): if self._base_path is None: self._base_path", "self.resources[name] = fn def require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def", "0 else '') + name[0].lower() elif name[0] == '_': acc += '-' else:", "= context.base_url + '%s.zip' % name try: print('Downloading resource %s.' % name) response", "'%s.zip' % name try: print('Downloading resource %s.' % name) response = requests.get(url) with", "e def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res return", "io import zipfile from collections import namedtuple import os def _convert_name(name, acc =", "self.get(r) downloader = Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path):", "return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self):", "x: x def download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance):", "collections import namedtuple import os def _convert_name(name, acc = ''): if len(name) ==", "return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r) downloader = Downloader() def", "task) return task def require_resource(name): downloader.require(name) return lambda x: x def download_resource_task(name): def", "name[0].lower() elif name[0] == '_': acc += '-' else: acc += name[0] return", "@property def base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property", "self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name,", "self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path,", "lambda x: x def download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk def", "def _convert_name(name, acc = ''): if len(name) == 0: return acc if name[0].isupper():", "return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self,", "self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements: self.get(r)", "name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r in self._all_requirements:", "return download_resource(name, context) return thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test'))", "return resource_path url = context.base_url + '%s.zip' % name try: print('Downloading resource %s.'", "self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self,", "if name[0].isupper(): acc += ('-' if len(acc) > 0 else '') + name[0].lower()", "'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements = [] @property def base_path(self):", "os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip' % name", "return thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test', download_resource_task('test')) add_resources(downloader) def resource(name):", "response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return", "_thunk(res): downloader.add_resource(task, res) return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name, task) return", "os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip' % name try: print('Downloading resource", "print('Resource %s downloaded.' %name) return resource_path except Exception as e: if os.path.exists(resource_path): os.remove(resource_path)", "namedtuple import os def _convert_name(name, acc = ''): if len(name) == 0: return", "os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return", "if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res return _thunk name =", "name) if os.path.exists(resource_path): return resource_path url = context.base_url + '%s.zip' % name try:", "DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path', 'store_path']) class Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/'", "[] @property def base_path(self): if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path", "None self._all_requirements = [] @property def base_path(self): if self._base_path is None: self._base_path =", "Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if os.path.exists(resource_path): return resource_path url", "if os.path.exists(resource_path): os.remove(resource_path) raise e def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task,", "+= '-' else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url',", "self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path, name) if", "thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance): # Add test resource downloader_instance.add_resource('test',", "self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] = fn def require(self, name):", "def register_resource(task): if isinstance(task, str): def _thunk(res): downloader.add_resource(task, res) return res return _thunk", "self._base_path = None self._all_requirements = [] @property def base_path(self): if self._base_path is None:", "def download_all(self): for r in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context):", "z: z.extractall(resource_path) print('Resource %s downloaded.' %name) return resource_path except Exception as e: if", "os def _convert_name(name, acc = ''): if len(name) == 0: return acc if", "% name try: print('Downloading resource %s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content))", "Downloader: def __init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements", "'-' else: acc += name[0] return _convert_name(name[1:], acc) DownloaderContext = namedtuple('DownloaderContext', ['base_url', 'resources_path',", "def require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for r", "name try: print('Downloading resource %s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as", "elif name[0] == '_': acc += '-' else: acc += name[0] return _convert_name(name[1:],", "fn def require(self, name): self._all_requirements.append(name) def get(self, name): return self.resources[name](self.create_context(name)) def download_all(self): for", "self.resources = dict() self._base_path = None self._all_requirements = [] @property def base_path(self): if", "> 0 else '') + name[0].lower() elif name[0] == '_': acc += '-'", "= 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements = [] @property def", "name[0].isupper(): acc += ('-' if len(acc) > 0 else '') + name[0].lower() elif", "+ '%s.zip' % name try: print('Downloading resource %s.' % name) response = requests.get(url)", "resource %s.' % name) response = requests.get(url) with zipfile.ZipFile(io.BytesIO(response.content)) as z: z.extractall(resource_path) print('Resource", "if self._base_path is None: self._base_path = os.path.expanduser('~/.visual_navigation') return self._base_path @property def resources_path(self): return", "r in self._all_requirements: self.get(r) downloader = Downloader() def download_resource(name, context): resource_path = os.path.join(context.resources_path,", "__init__(self): self.base_url = 'https://deep-rl.herokuapp.com/resources/' self.resources = dict() self._base_path = None self._all_requirements = []", "create_context(self, name): return DownloaderContext(self.base_url, self.resources_path, os.path.join(self.resources_path, name)) def add_resource(self, name, fn): self.resources[name] =", "x def download_resource_task(name): def thunk(context): return download_resource(name, context) return thunk def add_resources(downloader_instance): #", "import namedtuple import os def _convert_name(name, acc = ''): if len(name) == 0:", "== 0: return acc if name[0].isupper(): acc += ('-' if len(acc) > 0", "return self._base_path @property def resources_path(self): return os.path.join(self.base_path, 'resources') def create_context(self, name): return DownloaderContext(self.base_url,", "str): def _thunk(res): downloader.add_resource(task, res) return res return _thunk name = _convert_name(task.__name__) downloader.add_resource(name,", "from collections import namedtuple import os def _convert_name(name, acc = ''): if len(name)" ]
[ "plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\",", "= models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum", "import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") +", "django.conf import settings from django.utils import timezone from django.utils.translation import ugettext_lazy as _", "firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='')", "max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True)", "\" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering = ['jmeno',", "max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve", "from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return", "= models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska =", "def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno", "Benedettiho 709, 530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico", "upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering = ['produkt', ] verbose_name =", "u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering = ['produkt', ]", "+ \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering =", "RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka", "leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug =", "models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška", "class Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class", "max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date", "tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice,", "\" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering = ['jmeno', ]", "= models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering =", "jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField(", "= models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='')", "+ self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering = ['jmeno', ] verbose_name", "'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\"))", "email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123", "auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug})", "django.utils.translation import ugettext_lazy as _ import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file", "datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno =", "produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\",", "firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255,", "= models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField(", "default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse", "= ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka =", "models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon", "709, 530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico =", "verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True,", "blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava", "self.jmeno class Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky'", "verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return", "import models from django.conf import settings from django.utils import timezone from django.utils.translation import", "models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\",", "models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\",", "ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka", "platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date =", "leonardo.module.web.models import Widget from leonardo.module.media.fields.image import ImageField from django.db import models from django.conf", "leonardo.module.media.fields.image import ImageField from django.db import models from django.conf import settings from django.utils", "max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\",", "models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='',", "+420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho", "123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709,", "= models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce", "verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField(", "= models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456", "django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255,", "default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420", "verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\")", "timezone from django.utils.translation import ugettext_lazy as _ import datetime from django.utils.encoding import python_2_unicode_compatible", "(ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.:", "max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def", "help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\",", "blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='')", "return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \"", "Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model):", "verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField(", "verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma = models.CharField( max_length=255,", "= models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from", "telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa =", "models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email", "import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField(", "self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return", "= models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu =", "import Widget from leonardo.module.media.fields.image import ImageField from django.db import models from django.conf import", "produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska", "models from django.conf import settings from django.utils import timezone from django.utils.translation import ugettext_lazy", "default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa", "models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni", "# encoding: utf-8 from leonardo.module.web.models import Widget from leonardo.module.media.fields.image import ImageField from django.db", "str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering", "max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField(", "456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530", "= models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor", "u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\",", "as _ import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class", "03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\",", "kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self):", "Pardubice, Benedettiho 709, 530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='')", "jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True,", "= models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def", "import ImageField from django.db import models from django.conf import settings from django.utils import", "__unicode__(self): return self.produkt class Meta: ordering = ['produkt', ] verbose_name = u'Produkt' verbose_name_plural", "verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\",", "models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic =", "return self.jmeno class Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural =", "app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" +", "verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255)", "encoding: utf-8 from leonardo.module.web.models import Widget from leonardo.module.media.fields.image import ImageField from django.db import", "self.produkt class Meta: ordering = ['produkt', ] verbose_name = u'Produkt' verbose_name_plural = u'Produkty'", "return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta:", "import timezone from django.utils.translation import ugettext_lazy as _ import datetime from django.utils.encoding import", "django.utils import timezone from django.utils.translation import ugettext_lazy as _ import datetime from django.utils.encoding", "max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255)", "motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class", "789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\",", "= models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField(", "models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky',", "doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField(", "def __unicode__(self): return self.jmeno class Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka'", "max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255)", "from django.db import models from django.conf import settings from django.utils import timezone from", "from django.conf import settings from django.utils import timezone from django.utils.translation import ugettext_lazy as", "verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška", "class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\",", "from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug", "verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email =", "null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava =", "blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return", "models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse", "] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\",", "ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField(", "import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno", "= models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True)", "MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL", "max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni =", "import settings from django.utils import timezone from django.utils.translation import ugettext_lazy as _ import", "def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def", "podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého", "slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\",", "from django.utils import timezone from django.utils.translation import ugettext_lazy as _ import datetime from", "= models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma =", "zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self):", "models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma = models.CharField(", "max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\",", "utf-8 from leonardo.module.web.models import Widget from leonardo.module.media.fields.image import ImageField from django.db import models", "= models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField(", "= models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\",", "dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma", "max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba", "verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import", "related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255)", "default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150, blank=True, null=True) prijmeni = models.CharField( max_length=255,", "models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\",", "get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class", "verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr", "reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání", "= models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic", "django.db import models from django.conf import settings from django.utils import timezone from django.utils.translation", "['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders,", "RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='') slug = models.CharField( verbose_name=u\"URL ID\", max_length=150,", "prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon =", "adresa\", help_text=\"Př.: Pardubice, Benedettiho 709, 530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název", "u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt =", "dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering = ['produkt', ] verbose_name", "530 03\", max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField(", "verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru:", "max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta:", "def __unicode__(self): return self.produkt class Meta: ordering = ['produkt', ] verbose_name = u'Produkt'", "DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255)", "objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka =", "verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\",", "ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li", "ugettext_lazy as _ import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField", "_ import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model):", "raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt", "max_length=255) firma = models.CharField( max_length=255, verbose_name=u\"Název firmy\", default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255,", "rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/')", "tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu", "__unicode__(self): return self.jmeno class Meta: ordering = ['jmeno', ] verbose_name = u'Objednávka' verbose_name_plural", "from django.utils.translation import ugettext_lazy as _ import datetime from django.utils.encoding import python_2_unicode_compatible from", "from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField(", "default='') ico = models.CharField( verbose_name=u\"IČO\", max_length=255, default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte,", "objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug':", "vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255)", "pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse(", "= models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField(", "models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField(", "max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat',", "models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self):", "from leonardo.module.media.fields.image import ImageField from django.db import models from django.conf import settings from", "'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \" \" + self.prijmeni.encode(\"utf-8\")) def", "verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací adresa\",", "default='') dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava", "self.prijmeni.encode(\"utf-8\")) def __unicode__(self): return self.jmeno class Meta: ordering = ['jmeno', ] verbose_name =", "null=True) prijmeni = models.CharField( max_length=255, verbose_name=u\"Příjmení\", default='') email = models.EmailField( verbose_name=u\"E-mail\", default='') telefon", "models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering = ['produkt',", "models.CharField( verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor =", "= u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte", "models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl',", "= u'Objednávka' verbose_name_plural = u'Objednávky' class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt", "from leonardo.module.web.models import Widget from leonardo.module.media.fields.image import ImageField from django.db import models from", "models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255) tloustka = models.CharField( verbose_name=u\"Výška podstavy\", max_length=255) vyska = models.CharField(", "app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\") + \"", "dic = models.CharField( verbose_name=u\"DIČ\", max_length=255, help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava =", "models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100) dorucovaci_adresa = models.CharField( verbose_name=u\"Doručovací", "verbose_name=u\"Výška reliéfu\", max_length=255) rozmer_motivu = models.CharField( verbose_name=u\"Rozměr raženého motivu\", max_length=255) soubor = models.FileField(", "return self.produkt class Meta: ordering = ['produkt', ] verbose_name = u'Produkt' verbose_name_plural =", "help_text=\"Vyplňte, jste-li plátce DPH\", blank=True, null=True) doprava = models.CharField( verbose_name=u\"Doprava\", max_length=255) platba =", "soubor = models.FileField( u'Nahrání dat', upload_to='documents/%Y/%m/%d/') def __unicode__(self): return self.produkt class Meta: ordering", "verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True) pub_date = models.DateTimeField(u'Datum objednávky', auto_now_add=True)", "ImageField from django.db import models from django.conf import settings from django.utils import timezone", "import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\",", "import ugettext_lazy as _ import datetime from django.utils.encoding import python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import", "= models.DateTimeField(u'Datum objednávky', auto_now_add=True) def get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order',", "get_absolute_url(self): from leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self):", "python_2_unicode_compatible from leonardo.module.media.fields.multistorage_file import MultiStorageFileField class RoudnyreslOrders(models.Model): jmeno = models.CharField( max_length=255, verbose_name=u\"Jméno\", default='')", "settings from django.utils import timezone from django.utils.translation import ugettext_lazy as _ import datetime", "leonardo.module.web.widget.application.reverse import app_reverse return app_reverse( 'created_order', 'leonardo_form_roudnyresl.apps.roudnyresl', kwargs={'slug': self.slug}) def get_full_name(self): return str(self.jmeno.encode(\"utf-8\")", "class RoudnyreslProduct(models.Model): objednavka = models.ForeignKey(RoudnyreslOrders, verbose_name=u\"Objednávka\", related_name=\"orderproduct_set\") produkt = models.CharField( verbose_name=u\"Vyberte produkt\", max_length=255)", "verbose_name=u\"E-mail\", default='') telefon = models.CharField( verbose_name=u\"Telefon (ve tvaru: +420 123 456 789)\", max_length=100)", "Widget from leonardo.module.media.fields.image import ImageField from django.db import models from django.conf import settings", "verbose_name=u\"Doprava\", max_length=255) platba = models.CharField( verbose_name=u\"Platba\", max_length=255) zprava = models.TextField( verbose_name=u\"Poznámka\", default='', blank=True)" ]
[ "= URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def", "self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1", "w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse", "def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2", "the HTTP response so it never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s'", "href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers,", "= HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url)", "test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2", "parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False)", "'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # #", "parsers_mock: # # Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html,", "'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT',", "Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value =", "self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def", "self.dpc._parser_blacklist) # # Make sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more", "http_resp) else: self.assertTrue(False) # # Make sure it is in the blacklist #", "html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 =", "terms of the GNU General Public License as published by the Free Software", "timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing", "self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache tries to parse an", "else: self.assertTrue(False) # # Make sure it is in the blacklist # hash_string", "or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for", "# # Make sure it is in the blacklist # hash_string = get_response_unique_id(http_resp)", "of the GNU General Public License along with w3af; if not, write to", "should have received a copy of the GNU General Public License along with", "id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self):", "import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import", "except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg", "HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2))", "self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 =", "General Public License for more details. You should have received a copy of", "of the GNU General Public License as published by the Free Software Foundation", "parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html", "= HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1),", "not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,", "Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA", "# # Make sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException,", "is free software; you can redistribute it and/or modify it under the terms", "url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 =", "'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp", "def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url)", "self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 =", "DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self):", "= '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200,", "= '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value", "but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS", "as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the", "HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('b',))", "that it will be useful, but WITHOUT ANY WARRANTY; without even the implied", "= HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the", "the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301", "as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS',", "timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock)", "FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.", "self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1,", "id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>'", "% 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\", "self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 =", "'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout # html = '<html>DelayedParser!</html>'", "GNU General Public License along with w3af; if not, write to the Free", "import unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser", "# https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0, 255)]) response = HTTPResponse(200,", "warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General", "tries to parse an HTTP response, that process fails, then we blacklist #", "This file is part of w3af, http://w3af.org/ . w3af is free software; you", "resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2,", "timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 =", "self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2", "test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 =", "'<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200, body,", "<reponame>ZooAtmosphereGroup/HelloPackages \"\"\" test_parser_cache.py Copyright 2012 <NAME> This file is part of w3af, http://w3af.org/", "parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188", "import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import", "html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 =", "class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc =", "''.join([chr(i) for i in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url)", "blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing',", "\"\"\" import unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from", "# If the cache tries to parse an HTTP response, that process fails,", "PURPOSE. See the GNU General Public License for more details. You should have", "sure it is in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) #", "Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import unittest from mock import patch,", "self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache tries to parse an HTTP", "= HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url)", "max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe,", "1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe:", "test_cache_blacklist_after_timeout(self): # # If the cache tries to parse an HTTP response, that", "HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache", "from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from", "= self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 =", "response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If", "self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers,", "html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1 =", "= ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url,", "= get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist is used #", "'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock)", "an HTTP response, that process fails, then we blacklist # the HTTP response", "test_parser_cache.py Copyright 2012 <NAME> This file is part of w3af, http://w3af.org/ . w3af", "Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException", "we blacklist # the HTTP response so it never gets parsed again. #", "msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url,", "ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers", "General Public License along with w3af; if not, write to the Free Software", "w3af is free software; you can redistribute it and/or modify it under the", "import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import", "under the terms of the GNU General Public License as published by the", "with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as", "resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2)", "have received a copy of the GNU General Public License along with w3af;", "the GNU General Public License as published by the Free Software Foundation version", "w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions", "self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache tries to", "If the cache tries to parse an HTTP response, that process fails, then", "HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1", "= [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) #", "for more details. You should have received a copy of the GNU General", "Foundation version 2 of the License. w3af is distributed in the hope that", "Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\"", "the GNU General Public License along with w3af; if not, write to the", "file is part of w3af, http://w3af.org/ . w3af is free software; you can", "by the Free Software Foundation version 2 of the License. w3af is distributed", "_, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): #", "u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc',", "parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1", "'<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html,", "fails, then we blacklist # the HTTP response so it never gets parsed", "msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a", "can redistribute it and/or modify it under the terms of the GNU General", "See the GNU General Public License for more details. You should have received", "\\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as", "the cache tries to parse an HTTP response, that process fails, then we", "self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1", "ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url)", "'<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value =", "never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s'", "02110-1301 USA \"\"\" import unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import", "tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 =", "'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def", "self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp)", "it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty", "http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser,", "_build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try:", "self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a", "without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.", "self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200,", "Floor, Boston, MA 02110-1301 USA \"\"\" import unittest from mock import patch, PropertyMock", "response, that process fails, then we blacklist # the HTTP response so it", "URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2", "patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock:", "published by the Free Software Foundation version 2 of the License. w3af is", "in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure", "'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 =", "it is in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # #", "Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import", "WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR", "cache tries to parse an HTTP response, that process fails, then we blacklist", "# Make sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe:", "patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock,", "it and/or modify it under the terms of the GNU General Public License", "# Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value", "is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe))", "get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers", "try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe,", "\\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout #", "= self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html =", "from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class", "self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1)", "url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1,", "= self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2", "def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1", "HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',))", "# # If the cache tries to parse an HTTP response, that process", "w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase):", "self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200,", "= '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200,", "= self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1,", "the hope that it will be useful, but WITHOUT ANY WARRANTY; without even", "gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp", "def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def", "will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of", "= HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2,", "timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value", "from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers =", "_, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i)", "resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 =", "while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".'", "w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url", "St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import unittest from mock import", "License along with w3af; if not, write to the Free Software Foundation, Inc.,", "parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _,", "self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp):", "body, self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1 =", "self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1)", "the License. w3af is distributed in the hope that it will be useful,", "to parse an HTTP response, that process fails, then we blacklist # the", "TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache()", "of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public", "the terms of the GNU General Public License as published by the Free", "from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url", "= '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200,", "mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from", "tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html,", "parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/')", "new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp", "is part of w3af, http://w3af.org/ . w3af is free software; you can redistribute", "patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout # html", "from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from", "1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else:", "body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1) resp2 =", "is in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make", "resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url,", "you can redistribute it and/or modify it under the terms of the GNU", "test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0, 255)]) response =", "% 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: #", "HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1", "URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers,", "be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for", "HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make", "hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist is used", "_build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse", "process fails, then we blacklist # the HTTP response so it never gets", "bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached", "get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist is used # try:", "the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the", "URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self):", "in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self):", "Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import unittest", "self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body =", "# html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value =", "Boston, MA 02110-1301 USA \"\"\" import unittest from mock import patch, PropertyMock from", "parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200,", "self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self):", "import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com')", "u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp)", "self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure it", "= self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body", "it under the terms of the GNU General Public License as published by", "'<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html,", "self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2", "the timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1", "= _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser]", "mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp", "<NAME> This file is part of w3af, http://w3af.org/ . w3af is free software;", "self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache tries to parse", "import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import", "License for more details. You should have received a copy of the GNU", "patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import", "parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0, 255)])", "self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2))", "toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html", "GNU General Public License as published by the Free Software Foundation version 2", "URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser", "from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from", "details. You should have received a copy of the GNU General Public License", "mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock,", "if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth", "url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 =", "url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references()", "Copyright 2012 <NAME> This file is part of w3af, http://w3af.org/ . w3af is", "so it never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp", "new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\ patch(modp %", "= URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body, self.headers, url1, url1)", "BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure it is in", "\"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html,", "distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;", "url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2", "GNU General Public License for more details. You should have received a copy", "html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html') timeout_mock.return_value = 1 max_workers_mock.return_value = 1", "WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR", "unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import", "parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references()", "except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure it is", "parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe),", "w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url =", "[DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # #", "from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from", "resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers, self.url,", "General Public License as published by the Free Software Foundation version 2 of", "xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): #", "2012 <NAME> This file is part of w3af, http://w3af.org/ . w3af is free", "import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache", "% 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS', new_callable=PropertyMock) as max_workers_mock, \\", "modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp %", "the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while", "used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def", "# hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist is", "all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # # If the cache tries", "self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a", "implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import", "even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See", "PARTICULAR PURPOSE. See the GNU General Public License for more details. You should", "Software Foundation version 2 of the License. w3af is distributed in the hope", "self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0,", "def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0, 255)]) response", "parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/')", "with w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin", "http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html =", "tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>'", "Make sure it is in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist)", "from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response", "A PARTICULAR PURPOSE. See the GNU General Public License for more details. You", "href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers,", "self.headers, self.url, self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self):", "'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1", "bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure it is in the", "a copy of the GNU General Public License along with w3af; if not,", "USA \"\"\" import unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser", "received a copy of the GNU General Public License along with w3af; if", "w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers", "setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self):", "response so it never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp =", "as published by the Free Software Foundation version 2 of the License. w3af", "the Free Software Foundation version 2 of the License. w3af is distributed in", "Public License along with w3af; if not, write to the Free Software Foundation,", "255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def test_cache_blacklist_after_timeout(self): # #", "again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s'", "HTTP response, that process fails, then we blacklist # the HTTP response so", "w3af is distributed in the hope that it will be useful, but WITHOUT", "copy of the GNU General Public License along with w3af; if not, write", "html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('a',)) self.assertEqual(id(parser1),", "Public License as published by the Free Software Foundation version 2 of the", "import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def", "write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,", "from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from", "from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from", "parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in", "= HTTPResponse(200, body, self.headers, url1, url1) resp2 = HTTPResponse(200, body, self.headers, url2, url2)", "import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')])", "= HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1),", "http://w3af.org/ . w3af is free software; you can redistribute it and/or modify it", "that process fails, then we blacklist # the HTTP response so it never", "License. w3af is distributed in the hope that it will be useful, but", "redistribute it and/or modify it under the terms of the GNU General Public", "Public License for more details. You should have received a copy of the", "# mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with", "test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2", "= Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 =", "href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers,", "HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2))", "free software; you can redistribute it and/or modify it under the terms of", "HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache from w3af.core.data.parsers.doc.url import URL", "2 of the License. w3af is distributed in the hope that it will", "= URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200, body,", "51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import unittest from", "max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout", "def test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc',", "BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg =", "and/or modify it under the terms of the GNU General Public License as", "w3af.core.controllers.exceptions import BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type',", "Franklin St, Fifth Floor, Boston, MA 02110-1301 USA \"\"\" import unittest from mock", "as parsers_mock: # # Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp =", "body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _,", "self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure it is in the blacklist", "= 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp % 'MAX_WORKERS',", "# # Trigger the timeout # html = '<html>DelayedParser!</html>' http_resp = _build_http_response(html, u'text/html')", "part of w3af, http://w3af.org/ . w3af is free software; you can redistribute it", "software; you can redistribute it and/or modify it under the terms of the", "sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout", "all_chars = ''.join([chr(i) for i in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers,", "\"\"\" test_parser_cache.py Copyright 2012 <NAME> This file is part of w3af, http://w3af.org/ .", "test_basic(self): resp1 = HTTPResponse(200, 'abc', self.headers, self.url, self.url) resp2 = HTTPResponse(200, 'abc', self.headers,", "parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars =", "self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout", "to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA", "along with w3af; if not, write to the Free Software Foundation, Inc., 51", "blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the blacklist", "new_callable=PropertyMock) as max_workers_mock, \\ patch(modp % 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger", "= parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A", "HTTP response so it never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp", "timeout_mock.return_value = 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except", "self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def", "# Make sure it is in the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string,", "import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id from w3af.core.controllers.exceptions import", "'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as timeout_mock, \\ patch(kmpdp", "MA 02110-1301 USA \"\"\" import unittest from mock import patch, PropertyMock from w3af.core.data.parsers.doc.html", "https://github.com/andresriancho/w3af/issues/188 all_chars = ''.join([chr(i) for i in xrange(0, 255)]) response = HTTPResponse(200, all_chars,", "= self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 = URL('http://w3af.com/foo/') url2", "of w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or", "self.url) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2) self.assertEqual(id(parser1), id(parser2)) def test_bug_13_Dec_2012(self): url1 =", "it never gets parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp %", "License as published by the Free Software Foundation version 2 of the License.", "= mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock) as", "then we blacklist # the HTTP response so it never gets parsed again.", "in the hope that it will be useful, but WITHOUT ANY WARRANTY; without", "try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp) else: self.assertTrue(False) # # Make sure", "the blacklist # hash_string = get_response_unique_id(http_resp) self.assertIn(hash_string, self.dpc._parser_blacklist) # # Make sure the", "Free Software Foundation version 2 of the License. w3af is distributed in the", "= ''.join([chr(i) for i in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url,", "modify it under the terms of the GNU General Public License as published", "self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear()", "self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('b',)) self.assertNotEqual(id(parser1), id(parser2))", "for i in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response)", "HTTPResponse(200, html, self.headers, self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1", "self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>' resp1 = HTTPResponse(200, html, self.headers,", "more details. You should have received a copy of the GNU General Public", "# try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded timeout while parsing', str(bfe)) def _is_timeout_exception_message(self,", "id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url,", "w3af.core.data.parsers.doc.url import URL from w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser", "of the License. w3af is distributed in the hope that it will be", "HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id import get_response_unique_id", "blacklist # the HTTP response so it never gets parsed again. # mmpdp", "= parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2) def test_issue_188_invalid_url(self): # https://github.com/andresriancho/w3af/issues/188 all_chars", "def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers, self.url, self.url)", "self.dpc.get_document_parser_for(resp2) self.assertNotEqual(id(parser1), id(parser2)) _, parsed_refs_1 = parser1.get_references() _, parsed_refs_2 = parser2.get_references() self.assertEqual(parsed_refs_1, parsed_refs_2)", "PropertyMock from w3af.core.data.parsers.doc.html import HTMLParser from w3af.core.data.parsers.tests.test_document_parser import _build_http_response from w3af.core.data.parsers.parser_cache import ParserCache", "= 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self): html = '<a href=\"/def\">abc</a>'", "You should have received a copy of the GNU General Public License along", "self.assertEqual(id(parser1), id(parser2)) def test_get_tags_by_filter_different_tags(self): html = '<a href=\"/def\">abc</a><b>hello</b>' resp1 = HTTPResponse(200, html, self.headers,", ". w3af is free software; you can redistribute it and/or modify it under", "i in xrange(0, 255)]) response = HTTPResponse(200, all_chars, self.headers, self.url, self.url) self.dpc.get_document_parser_for(response) def", "hope that it will be useful, but WITHOUT ANY WARRANTY; without even the", "self.url, self.url) resp2 = HTTPResponse(200, html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',))", "w3af, http://w3af.org/ . w3af is free software; you can redistribute it and/or modify", "html, self.headers, self.url, self.url) parser1 = self.dpc.get_tags_by_filter(resp1, tags=('a',)) parser2 = self.dpc.get_tags_by_filter(resp2, tags=('b',)) self.assertNotEqual(id(parser1),", "str(bfe)) def _is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg)", "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License", "self.assertTrue(False) # # Make sure it is in the blacklist # hash_string =", "kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp % 'PARSER_TIMEOUT', new_callable=PropertyMock)", "self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1", "w3af; if not, write to the Free Software Foundation, Inc., 51 Franklin St,", "parsed again. # mmpdp = 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp =", "= 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self._is_timeout_exception_message(bfe, http_resp)", "def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def", "useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or", "def test_cache_blacklist_after_timeout(self): # # If the cache tries to parse an HTTP response,", "w3af.core.data.url.HTTPResponse import HTTPResponse from w3af.core.data.dc.headers import Headers from w3af.core.data.parsers.tests.test_mp_document_parser import DelayedParser from w3af.core.data.parsers.utils.response_uniq_id", "= 'w3af.core.data.parsers.mp_document_parser.%s' kmpdp = mmpdp % 'MultiProcessingDocumentParser.%s' modp = 'w3af.core.data.parsers.document_parser.%s' with patch(kmpdp %", "url1 = URL('http://w3af.com/foo/') url2 = URL('http://w3af.com/bar/') body = '<a href=\"?id=1\">1</a>' resp1 = HTTPResponse(200,", "parse an HTTP response, that process fails, then we blacklist # the HTTP", "is distributed in the hope that it will be useful, but WITHOUT ANY", "Headers([(u'content-type', u'text/html')]) self.dpc = ParserCache() def tearDown(self): self.dpc.clear() def test_basic(self): resp1 = HTTPResponse(200,", "BaseFrameworkException class TestParserCache(unittest.TestCase): def setUp(self): self.url = URL('http://w3af.com') self.headers = Headers([(u'content-type', u'text/html')]) self.dpc", "resp2 = HTTPResponse(200, body, self.headers, url2, url2) parser1 = self.dpc.get_document_parser_for(resp1) parser2 = self.dpc.get_document_parser_for(resp2)", "version 2 of the License. w3af is distributed in the hope that it", "_is_timeout_exception_message(self, toe, http_resp): msg = 'Reached timeout parsing \"http://w3af.com/\".' self.assertEquals(str(toe), msg) def test_get_tags_by_filter_simple(self):", "the GNU General Public License for more details. You should have received a", "Make sure the blacklist is used # try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException, bfe: self.assertIn('Exceeded", "# the HTTP response so it never gets parsed again. # mmpdp =", "= 1 max_workers_mock.return_value = 1 parsers_mock.return_value = [DelayedParser, HTMLParser] try: self.dpc.get_document_parser_for(http_resp) except BaseFrameworkException,", "% 'DocumentParser.PARSERS', new_callable=PropertyMock) as parsers_mock: # # Trigger the timeout # html =" ]
[ "XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi =", "hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import XToolsFunc", "self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\")", "import binascii import hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from", "self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def", "from xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available =", "(*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for", "binascii import hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc", "enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result", "= self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result =", "in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value", "method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result", "= XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1])", "base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\")", "self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else:", "main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_()) if __name__ == \"__main__\":", "== \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app", "self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir =", "import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"]", "self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText()", "self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All", "Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form):", "if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i])", "= XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi", "enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main():", "self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if", "enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type", "self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self):", "hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\")", "sys import binascii import hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form", "hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm)", "def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value:", "self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif", "self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear()", "self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear()", "[\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\")", "self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\":", "QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available", "import Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class", "= self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\":", "in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self):", "self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi = MainUi()", "confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if", "self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_()) if __name__", "= self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type ==", "openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText()", "result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0])", "elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def", "from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method", "self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list", "import hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import", "for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method =", "= self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1])", "i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0]", "self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\")", "i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def", "self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText()", "self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir", "for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i])", "value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type ==", "else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method", "xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None):", "self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self):", "def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i in", "\"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app =", "self.method_ComboBox.currentText() value = self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value)", "def main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_()) if __name__ ==", "from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def", "else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_())", "result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv)", "if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\":", "import sys import binascii import hashlib from PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import", "for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files", "\"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i", "import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent)", "value = self.input_TextEdit.toPlainText() if value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0])", "= list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)):", "MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for", "range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText() value =", "list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\")", "file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list =", "def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if", "range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def", "self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open", "def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i", "PyQt5.QtWidgets import QApplication,QMainWindow,QFileDialog from xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method =", "self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i", "range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type", "self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_()) if", "XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) else:", "in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def openfile(self): filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir)", "filedir = self.getOpenFileName(self,\"open file\",\"./\",\"All Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() ==", "self.output_label.setText(result[1]) else: self.output_label.setText(\"无输入\") self.ouput_TextBrowser.clear() def main(): app = QApplication(sys.argv) myUi = MainUi() myUi.show()", "super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)):", "Files (*)\")[0] self.input_TextEdit.setText(filedir) def enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available)", "hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in", "= hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type)", "i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type = self.type_ComboBox.currentText() method = self.method_ComboBox.currentText()", "self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\")", "self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i])", "self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for i in range(len(base64_method)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,base64_method[i]) def confirm(self): enc_type =", "== \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result =", "class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile)", "app = QApplication(sys.argv) myUi = MainUi() myUi.show() sys.exit(app.exec_()) if __name__ == \"__main__\": main()", "XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self)", "if value: if enc_type == \"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type", "__init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\") self.type_ComboBox.setItemText(0,\"base64\") self.type_ComboBox.setItemText(1,\"Hash\") self.type_ComboBox.activated.connect(self.enc_type) self.confirm_Button.clicked.connect(self.confirm) self.open_Button.clicked.connect(self.openfile) for i in", "enc_type(self): self.method_ComboBox.clear() if self.type_ComboBox.currentText() == \"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)):", "\"base64\": result = XToolsFunc.base64_method(method,value) self.ouput_TextBrowser.setText(result[0]) self.output_label.setText(result[1]) elif enc_type == \"Hash\": result = XToolsFunc.hash_method(method,value)", "== \"Hash\": hash_available_list = list(hash_available) for i in range(len(hash_available_list)): self.method_ComboBox.addItem(\"\") self.method_ComboBox.setItemText(i,hash_available_list[i]) else: for", "xtui import Ui_Form from xtoolsfunc import XToolsFunc base64_method = [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed", "= [\"encode\",\"decode\"] hash_available = hashlib.algorithms_guaranteed class MainUi(QMainWindow,QFileDialog,Ui_Form): def __init__(self,parent=None): super(MainUi,self).__init__(parent) self.setupUi(self) self.type_ComboBox.addItem(\"\") self.type_ComboBox.addItem(\"\")" ]
[ "{tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____ ______ ______ #", "is_valid_hash from aeternity.config import Config # also log to stdout because docker root", "up an account\"\"\" # recipient_address = request.form.get(\"account\") # validate the address if len(recipient_address.strip())", "response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250))", "||_ \\ / _||_ _ `. .' ____ \\ # / .' \\_|", "the top up service', 'opts': [] } ] parser = argparse.ArgumentParser() subparsers =", "def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\") # validate the", "_| |_\\/_| |_ _| |_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.' #", "3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys key", "= os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) #", "parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' #", "Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL',", "___ ||_ \\ / _||_ _ `. .' ____ \\ # / .'", "default=sa.get('default')) # parse the arguments args = parser.parse_args() # call the command with", ".' \\_| | \\/ | | | `. \\| (___ \\_| # |", "\\ `.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____) | # `.____ .'|_____||_____||______.'", "# also log to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch =", "\\_| | \\/ | | | `. \\| (___ \\_| # | |", "= parser.parse_args() # call the command with our args ret = getattr(sys.modules[__name__], 'cmd_{0}'.format(", "def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/')", "app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\"", "= int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp,", "\\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ ==", "up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance})", "aeternity from aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import", "250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" #", "int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\"", "# \\ `.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____) | # `.____", "'%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING)", "hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top", "'help': 'start the top up service', 'opts': [] } ] parser = argparse.ArgumentParser()", "logging import argparse # flask from flask import Flask, jsonify, render_template # aeternity", "rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\") # validate the address", "| | _.____`. # \\ `.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____)", "def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds", "= subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for sa in c.get('opts', []):", "parse the arguments args = parser.parse_args() # call the command with our args", "int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance =", "# logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*'", "__name__ == '__main__': cmds = [ { 'name': 'start', 'help': 'start the top", "= '*' return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html',", "request.form.get(\"account\") # validate the address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'):", "# `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000)", "subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'],", "jsonify, render_template # aeternity from aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash", "] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command'", "amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash:", "balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return", "python3 import os import sys import logging import argparse # flask from flask", "'command' # register all the commands for c in cmds: subp = subparsers.add_parser(c['name'],", "# ______ ____ ____ ______ ______ # .' ___ ||_ \\ / _||_", "or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key", "cmds = [ { 'name': 'start', 'help': 'start the top up service', 'opts':", "[] } ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest", "root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds = [", "<reponame>thecaliconoire/aepp-faucet<filename>faucet.py #!/usr/bin/env python3 import os import sys import logging import argparse # flask", "aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config #", "CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None): amount", "| | |\\ /| | | | | | _.____`. # \\ `.___.'\\", "from aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config", "methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\") # validate", "/ _||_ _ `. .' ____ \\ # / .' \\_| | \\/", "# call the command with our args ret = getattr(sys.modules[__name__], 'cmd_{0}'.format( args.command.replace('-', '_')))(args)", "%(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING)", "| _.____`. # \\ `.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____) |", "| | `. \\| (___ \\_| # | | | |\\ /| |", "render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address =", "tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address}", "_ `. .' ____ \\ # / .' \\_| | \\/ | |", "`. \\| (___ \\_| # | | | |\\ /| | | |", "sys import logging import argparse # flask from flask import Flask, jsonify, render_template", "also log to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout)", "| | | | _.____`. # \\ `.___.'\\ _| |_\\/_| |_ _| |_.'", "'name': 'start', 'help': 'start the top up service', 'opts': [] } ] parser", "@app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def", "== '__main__': cmds = [ { 'name': 'start', 'help': 'start the top up", "# aeternity from aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config", "from flask import Flask, jsonify, render_template # aeternity from aeternity.epoch import EpochClient from", "register all the commands for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) #", "\\____) | # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\")", "service', 'opts': [] } ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required =", "not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key =", "_| |_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger)", "'__main__': cmds = [ { 'name': 'start', 'help': 'start the top up service',", "app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds = [ { 'name': 'start', 'help':", "target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl", "os import sys import logging import argparse # flask from flask import Flask,", "recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl}", "arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the", "logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter)", "\\/ | | | `. \\| (___ \\_| # | | | |\\", "recipient_address = request.form.get(\"account\") # validate the address if len(recipient_address.strip()) < 3 or not", "ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s -", "external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100))", "client = EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top", "[]): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args = parser.parse_args() #", "`.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if", "parser.parse_args() # call the command with our args ret = getattr(sys.modules[__name__], 'cmd_{0}'.format( args.command.replace('-',", "_.____`. # \\ `.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____) | #", "arguments args = parser.parse_args() # call the command with our args ret =", "# | | | |\\ /| | | | | | _.____`. #", "= logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s", "amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx =", ".'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__", "|_ _| |_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None):", "= argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' # register", "key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\")", "KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT',", "| | | _.____`. # \\ `.___.'\\ _| |_\\/_| |_ _| |_.' /|", "balance}) # ______ ____ ____ ______ ______ # .' ___ ||_ \\ /", "log to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO)", "# logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin']", "| `. \\| (___ \\_| # | | | |\\ /| | |", "\"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None):", "= response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT',", "int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp, recipient_address,", "in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for sa", "= logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app =", "node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl =", "port=5000) if __name__ == '__main__': cmds = [ { 'name': 'start', 'help': 'start", "{amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____", "\"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\") # validate the address if", "/| \\____) | # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service", "\"balance\": balance}) # ______ ____ ____ ______ ______ # .' ___ ||_ \\", ".' ___ ||_ \\ / _||_ _ `. .' ____ \\ # /", "\"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client", "# add the sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'),", "| # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0',", "\"bad request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) #", "tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\")", "from aeternity.config import Config # also log to stdout because docker root =", "client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount}", "for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments", "| | | `. \\| (___ \\_| # | | | |\\ /|", "(___ \\_| # | | | |\\ /| | | | | |", "[ { 'name': 'start', 'help': 'start the top up service', 'opts': [] }", "kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount", "docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s", "address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}),", "request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target", "service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds = [ { 'name':", "in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args =", "action=sa.get('action'), default=sa.get('default')) # parse the arguments args = parser.parse_args() # call the command", "the commands for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the", "if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400", "| \\/ | | | `. \\| (___ \\_| # | | |", "parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' # register all the commands for", "%(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR)", "= int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an", "/| | | | | | _.____`. # \\ `.___.'\\ _| |_\\/_| |_", "Config # also log to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch", "# / .' \\_| | \\/ | | | `. \\| (___ \\_|", "top up service', 'opts': [] } ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers()", "ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch)", "render_template # aeternity from aeternity.epoch import EpochClient from aeternity.signing import KeyPair, is_valid_hash from", "- %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request", "sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse", "formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app", "c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args = parser.parse_args()", "- %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) #", "logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers", "logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__)", "#!/usr/bin/env python3 import os import sys import logging import argparse # flask from", "`. .' ____ \\ # / .' \\_| | \\/ | | |", "cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds =", "# parse the arguments args = parser.parse_args() # call the command with our", "'*' return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount)", "# genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config(", "help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args = parser.parse_args() # call the", "client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx,", "aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config # also log to stdout", "subparsers.dest = 'command' # register all the commands for c in cmds: subp", "subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' # register all the", "|_\\/_| |_ _| |_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.' # def", "Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header =", "help=c['help']) # add the sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'],", "EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config # also log", "bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") ))", "|_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.' # def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet", "logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] =", "= KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount =", "\\_| # | | | |\\ /| | | | | | _.____`.", "____ ____ ______ ______ # .' ___ ||_ \\ / _||_ _ `.", "the sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) #", "prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp", "# flask from flask import Flask, jsonify, render_template # aeternity from aeternity.epoch import", "amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx", "logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s -", "len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 #", "= parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' # register all the commands", "args = parser.parse_args() # call the command with our args ret = getattr(sys.modules[__name__],", "after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def", "|\\ /| | | | | | _.____`. # \\ `.___.'\\ _| |_\\/_|", "the arguments args = parser.parse_args() # call the command with our args ret", "ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response):", "argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest = 'command' # register all", "= EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up", "'start', 'help': 'start the top up service', 'opts': [] } ] parser =", "@app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return response", "import Flask, jsonify, render_template # aeternity from aeternity.epoch import EpochClient from aeternity.signing import", "jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____ ______ ______ # .' ___", "sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args", "EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont", "100)) client = EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address)", "import EpochClient from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config # also", "account\"\"\" # recipient_address = request.form.get(\"account\") # validate the address if len(recipient_address.strip()) < 3", "= client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance = client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of", "stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter =", "subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments args = parser.parse_args() # call", "subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for sa in c.get('opts',", "accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) #", "# register all the commands for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help'])", "%(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def", "'start the top up service', 'opts': [] } ] parser = argparse.ArgumentParser() subparsers", "jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key)", "= int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl) balance", "flask import Flask, jsonify, render_template # aeternity from aeternity.epoch import EpochClient from aeternity.signing", "KeyPair, is_valid_hash from aeternity.config import Config # also log to stdout because docker", "ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp, recipient_address, amount, tx_ttl=ttl)", "= client.get_balance(account_pubkey=recipient_address) logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\":", "`.___.'\\ _| |_\\/_| |_ _| |_.' /| \\____) | # `.____ .'|_____||_____||______.' \\______.'", "return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address", "root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s", "response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST'])", "- %(levelname)s - %(message)s') ch.setFormatter(formatter) root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) #", "import os import sys import logging import argparse # flask from flask import", "if __name__ == '__main__': cmds = [ { 'name': 'start', 'help': 'start the", "up service', 'opts': [] } ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required", "is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY')", "| | | |\\ /| | | | | | _.____`. # \\", "header = response.headers header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None): amount =", "'opts': [] } ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True", "= logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s')", "= 'command' # register all the commands for c in cmds: subp =", "add the sub arguments for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default'))", "cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for sa in", "/ .' \\_| | \\/ | | | `. \\| (___ \\_| #", "import logging import argparse # flask from flask import Flask, jsonify, render_template #", "the address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad", "subparsers.required = True subparsers.dest = 'command' # register all the commands for c", "return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____ ______ ______ # .'", "= True subparsers.dest = 'command' # register all the commands for c in", "validate the address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\":", "import argparse # flask from flask import Flask, jsonify, render_template # aeternity from", "of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____", "______ ______ # .' ___ ||_ \\ / _||_ _ `. .' ____", "aeternity.config import Config # also log to stdout because docker root = logging.getLogger()", "{recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______", "import sys import logging import argparse # flask from flask import Flask, jsonify,", "because docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter(", "| | | | | _.____`. # \\ `.___.'\\ _| |_\\/_| |_ _|", "= Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header", "c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub arguments for", "tx, \"balance\": balance}) # ______ ____ ____ ______ ______ # .' ___ ||_", "root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter = logging.Formatter( '%(asctime)s -", "for sa in c.get('opts', []): subp.add_argument(*sa['names'], help=sa['help'], action=sa.get('action'), default=sa.get('default')) # parse the arguments", "logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds = [ {", "True subparsers.dest = 'command' # register all the commands for c in cmds:", "\\ / _||_ _ `. .' ____ \\ # / .' \\_| |", "\\| (___ \\_| # | | | |\\ /| | | | |", "all the commands for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add", "flask from flask import Flask, jsonify, render_template # aeternity from aeternity.epoch import EpochClient", "from aeternity.signing import KeyPair, is_valid_hash from aeternity.config import Config # also log to", "commands for c in cmds: subp = subparsers.add_parser(c['name'], help=c['help']) # add the sub", "< 3 or not is_valid_hash(recipient_address, prefix='ak'): return jsonify({\"message\": \"bad request\"}), 400 # genesys", ")) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client =", "return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>',", "} ] parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparsers.required = True subparsers.dest =", "tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____ ______ ______", "250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient() tx = client.spend(kp, recipient_address, amount,", "| |\\ /| | | | | | _.____`. # \\ `.___.'\\ _|", "amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up", "an account\"\"\" # recipient_address = request.form.get(\"account\") # validate the address if len(recipient_address.strip()) <", "{ 'name': 'start', 'help': 'start the top up service', 'opts': [] } ]", "started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__': cmds = [ { 'name': 'start',", "def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return render_template('index.html', amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address):", ".' ____ \\ # / .' \\_| | \\/ | | | `.", "tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\": balance}) # ______ ____ ____ ______", "______ ____ ____ ______ ______ # .' ___ ||_ \\ / _||_ _", "root.addHandler(ch) app = Flask(__name__) logging.getLogger(\"aeternity.epoch\").setLevel(logging.WARNING) # logging.getLogger(\"urllib3.connectionpool\").setLevel(logging.WARNING) # logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable", "# amount amount = int(os.environ.get('TOPUP_AMOUNT', 250)) ttl = int(os.environ.get('TX_TTL', 100)) client = EpochClient()", "os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount", "= [ { 'name': 'start', 'help': 'start the top up service', 'opts': []", "import KeyPair, is_valid_hash from aeternity.config import Config # also log to stdout because", "= request.form.get(\"account\") # validate the address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address,", "return jsonify({\"message\": \"bad request\"}), 400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp =", "____ ______ ______ # .' ___ ||_ \\ / _||_ _ `. .'", "import Config # also log to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO)", "logging.getLogger(\"engineio\").setLevel(logging.ERROR) @app.after_request def after_request(response): \"\"\"enable CORS\"\"\" header = response.headers header['Access-Control-Allow-Origin'] = '*' return", "# target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL', \"https://sdk-testnet.aepp<EMAIL>\") )) # amount amount = int(os.environ.get('TOPUP_AMOUNT', 250))", "# def cmd_start(args=None): root.addHandler(app.logger) logging.info(\"faucet service started\") app.run(host='0.0.0.0', port=5000) if __name__ == '__main__':", "genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node Config.set_defaults(Config( external_url=os.environ.get('EPOCH_URL',", "@app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\") #", "____ \\ # / .' \\_| | \\/ | | | `. \\|", "amount=amount) @app.route('/account/<recipient_address>', methods=['POST']) def rest_faucet(recipient_address): \"\"\"top up an account\"\"\" # recipient_address = request.form.get(\"account\")", "# .' ___ ||_ \\ / _||_ _ `. .' ____ \\ #", "# recipient_address = request.form.get(\"account\") # validate the address if len(recipient_address.strip()) < 3 or", "______ # .' ___ ||_ \\ / _||_ _ `. .' ____ \\", "Flask, jsonify, render_template # aeternity from aeternity.epoch import EpochClient from aeternity.signing import KeyPair,", "400 # genesys key bank_wallet_key = os.environ.get('BANK_WALLET_KEY') kp = KeyPair.from_private_key_string(bank_wallet_key) # target node", "logging.info(f\"top up accont {recipient_address} of {amount} tx_ttl:{ttl} tx_hash: {tx}\") return jsonify({\"tx_hash\": tx, \"balance\":", "\\ # / .' \\_| | \\/ | | | `. \\| (___", "to stdout because docker root = logging.getLogger() root.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.INFO) formatter", "header['Access-Control-Allow-Origin'] = '*' return response @app.route('/') def hello(name=None): amount = int(os.environ.get('TOPUP_AMOUNT', 250)) return", "argparse # flask from flask import Flask, jsonify, render_template # aeternity from aeternity.epoch", "_||_ _ `. .' ____ \\ # / .' \\_| | \\/ |", "# validate the address if len(recipient_address.strip()) < 3 or not is_valid_hash(recipient_address, prefix='ak'): return" ]
[ "{ \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0 for api,", "weighted_mean/len(api_responses) # Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v", "api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses) # Ignoring a particular api", "weights[api] * value return weighted_mean/len(api_responses) # Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses):", "a particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v", "name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, }", "api, value in api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses) # Ignoring", "in api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses) # Ignoring a particular", "# Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for", "statistics import mean # Weighted by api name def coalesce_weighted_mean(api_responses): weights = {", "from statistics import mean # Weighted by api name def coalesce_weighted_mean(api_responses): weights =", "* value return weighted_mean/len(api_responses) # Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered", "\"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0 for api, value in api_responses.items():", "0.2, } weighted_mean = 0 for api, value in api_responses.items(): weighted_mean += weights[api]", "0.3, \"api3\": 0.2, } weighted_mean = 0 for api, value in api_responses.items(): weighted_mean", "\"api3\": 0.2, } weighted_mean = 0 for api, value in api_responses.items(): weighted_mean +=", "def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v in api_responses.items() if k.find(\"api1\")", "filtered = {k: v for k, v in api_responses.items() if k.find(\"api1\") == -1}", "by api name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\":", "= {k: v for k, v in api_responses.items() if k.find(\"api1\") == -1} return", "coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean =", "weighted_mean += weights[api] * value return weighted_mean/len(api_responses) # Ignoring a particular api response", "\"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0 for api, value", "Weighted by api name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3,", "value return weighted_mean/len(api_responses) # Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered =", "return weighted_mean/len(api_responses) # Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k:", "0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0 for api, value in", "mean # Weighted by api name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5,", "} weighted_mean = 0 for api, value in api_responses.items(): weighted_mean += weights[api] *", "0 for api, value in api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses)", "api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v in api_responses.items()", "def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean", "coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v in api_responses.items() if k.find(\"api1\") ==", "+= weights[api] * value return weighted_mean/len(api_responses) # Ignoring a particular api response def", "weighted_mean = 0 for api, value in api_responses.items(): weighted_mean += weights[api] * value", "= 0 for api, value in api_responses.items(): weighted_mean += weights[api] * value return", "# Weighted by api name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\":", "response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v in api_responses.items() if", "{k: v for k, v in api_responses.items() if k.find(\"api1\") == -1} return mean(filtered.values())", "weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0", "particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k, v in", "import mean # Weighted by api name def coalesce_weighted_mean(api_responses): weights = { \"api1\":", "api name def coalesce_weighted_mean(api_responses): weights = { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2,", "for api, value in api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses) #", "= { \"api1\": 0.5, \"api2\": 0.3, \"api3\": 0.2, } weighted_mean = 0 for", "value in api_responses.items(): weighted_mean += weights[api] * value return weighted_mean/len(api_responses) # Ignoring a", "Ignoring a particular api response def coalesce_mean_ignore_api1(api_responses): filtered = {k: v for k," ]
[ "# need to detect which version is being used: from sys import version_info", "sys import version_info if version_info[0] == 3: PY3 = True # elif version_info[0]", "if version_info[0] == 3: PY3 = True # elif version_info[0] == 2: #", "version of \" \"Python is not 3. This is not permitted. \" \"sys.version_info", "import syntax changes slightly between python 2 and 3, so we # need", "True # elif version_info[0] == 2: # PY3 = False else: raise EnvironmentError(\"sys.version_info", "# The import syntax changes slightly between python 2 and 3, so we", "# Author <NAME> # The import syntax changes slightly between python 2 and", "EnvironmentError(\"sys.version_info refers to a version of \" \"Python is not 3. This is", "== 2: # PY3 = False else: raise EnvironmentError(\"sys.version_info refers to a version", "need to detect which version is being used: from sys import version_info if", "of \" \"Python is not 3. This is not permitted. \" \"sys.version_info =", "version_info[0] == 3: PY3 = True # elif version_info[0] == 2: # PY3", "PY3 = True # elif version_info[0] == 2: # PY3 = False else:", "a version of \" \"Python is not 3. This is not permitted. \"", "2 and 3, so we # need to detect which version is being", "= False else: raise EnvironmentError(\"sys.version_info refers to a version of \" \"Python is", "which version is being used: from sys import version_info if version_info[0] == 3:", "3: PY3 = True # elif version_info[0] == 2: # PY3 = False", "The import syntax changes slightly between python 2 and 3, so we #", "so we # need to detect which version is being used: from sys", "refers to a version of \" \"Python is not 3. This is not", "# PY3 = False else: raise EnvironmentError(\"sys.version_info refers to a version of \"", "is not 3. This is not permitted. \" \"sys.version_info = {}\".format(version_info)) from .libQasm", "is being used: from sys import version_info if version_info[0] == 3: PY3 =", "version_info if version_info[0] == 3: PY3 = True # elif version_info[0] == 2:", "\"Python is not 3. This is not permitted. \" \"sys.version_info = {}\".format(version_info)) from", "\" \"Python is not 3. This is not permitted. \" \"sys.version_info = {}\".format(version_info))", "# elif version_info[0] == 2: # PY3 = False else: raise EnvironmentError(\"sys.version_info refers", "Author <NAME> # The import syntax changes slightly between python 2 and 3,", "raise EnvironmentError(\"sys.version_info refers to a version of \" \"Python is not 3. This", "== 3: PY3 = True # elif version_info[0] == 2: # PY3 =", "we # need to detect which version is being used: from sys import", "version is being used: from sys import version_info if version_info[0] == 3: PY3", "2: # PY3 = False else: raise EnvironmentError(\"sys.version_info refers to a version of", "and 3, so we # need to detect which version is being used:", "python 2 and 3, so we # need to detect which version is", "to detect which version is being used: from sys import version_info if version_info[0]", "detect which version is being used: from sys import version_info if version_info[0] ==", "version_info[0] == 2: # PY3 = False else: raise EnvironmentError(\"sys.version_info refers to a", "import version_info if version_info[0] == 3: PY3 = True # elif version_info[0] ==", "3, so we # need to detect which version is being used: from", "False else: raise EnvironmentError(\"sys.version_info refers to a version of \" \"Python is not", "PY3 = False else: raise EnvironmentError(\"sys.version_info refers to a version of \" \"Python", "not 3. This is not permitted. \" \"sys.version_info = {}\".format(version_info)) from .libQasm import", "between python 2 and 3, so we # need to detect which version", "used: from sys import version_info if version_info[0] == 3: PY3 = True #", "slightly between python 2 and 3, so we # need to detect which", "elif version_info[0] == 2: # PY3 = False else: raise EnvironmentError(\"sys.version_info refers to", "syntax changes slightly between python 2 and 3, so we # need to", "being used: from sys import version_info if version_info[0] == 3: PY3 = True", "from sys import version_info if version_info[0] == 3: PY3 = True # elif", "3. This is not permitted. \" \"sys.version_info = {}\".format(version_info)) from .libQasm import libQasm", "else: raise EnvironmentError(\"sys.version_info refers to a version of \" \"Python is not 3.", "= True # elif version_info[0] == 2: # PY3 = False else: raise", "to a version of \" \"Python is not 3. This is not permitted.", "changes slightly between python 2 and 3, so we # need to detect", "<NAME> # The import syntax changes slightly between python 2 and 3, so" ]
[ "def response(self, r): packet = '' print \"DOM\", self.domain nb = len(r) if", "re.compile(pattern) class DNSQuery: def __init__(self, data): self.data = data self.domain = '' t", "packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet", "min(nb, 256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r) return s # We", "service.get('state_id') print \"DNS state_id\", state_id if state_id == 0: addr = n['addr'] #", ">> 3) & 15 # Opcode bits if t == 0: # Standard", "= '' print \"DOM\", self.domain nb = len(r) if self.domain: packet += self.data[:2]", "found print 'DNS cannot find the hotname ip', addr # skip this node", "tag in n['tags']: services = n.get('services', {}) state_id = 0 if tag in", "services = n.get('services', {}) state_id = 0 if tag in services: service =", "add it if ipv4pattern.match(addr): r.append(addr) else: # else try to resolv it first", "if tag in services: service = services[tag] state_id = service.get('state_id') print \"DNS state_id\",", "it if ipv4pattern.match(addr): r.append(addr) else: # else try to resolv it first try:", "3) & 15 # Opcode bits if t == 0: # Standard query", "R:\", r return r def response(self, r): packet = '' print \"DOM\", self.domain", "tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print", "r def response(self, r): packet = '' print \"DOM\", self.domain nb = len(r)", "def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS", "ip in r: packet += '\\xc0\\x0c' # Pointer to domain name packet +=", "print \"DNS lookup for tag\", tag r = [] for n in nodes.values():", "already an ip, add it if ipv4pattern.match(addr): r.append(addr) else: # else try to", "ip, add it if ipv4pattern.match(addr): r.append(addr) else: # else try to resolv it", "else try to resolv it first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror:", "'\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet += self.data[12:] # Original Domain Name", "import socket pattern = r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def", "\"DNS state_id\", state_id if state_id == 0: addr = n['addr'] # If already", "packet = '' print \"DOM\", self.domain nb = len(r) if self.domain: packet +=", "tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r = [] for", "if state_id == 0: addr = n['addr'] # If already an ip, add", "the hotname ip', addr # skip this node print \"DNS R:\", r return", "r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data): self.data =", "(ord(data[2]) >> 3) & 15 # Opcode bits if t == 0: #", "service = services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id if state_id ==", "Domain Name Question for ip in r: packet += '\\xc0\\x0c' # Pointer to", "resource data length -> 4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) #", "= self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r = [] for n", "hotname ip', addr # skip this node print \"DNS R:\", r return r", "def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r = divmod(nb, 256) s =", "class DNSQuery: def __init__(self, data): self.data = data self.domain = '' t =", "+= '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data length -> 4 bytes", "# else try to resolv it first try: addr = socket.gethostbyname(addr) r.append(addr) except", "type, ttl and resource data length -> 4 bytes packet += str.join('',map(lambda x:", "re import socket pattern = r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery:", "256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r) return s # We look", "in nodes.values(): if tag in n['tags']: services = n.get('services', {}) state_id = 0", "nodes.values(): if tag in n['tags']: services = n.get('services', {}) state_id = 0 if", "n['addr'] # If already an ip, add it if ipv4pattern.match(addr): r.append(addr) else: #", ")\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data): self.data = data self.domain", "tag r = [] for n in nodes.values(): if tag in n['tags']: services", "_get_size_hex(self, nb): nb = min(nb, 256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r)", "== 0: # Standard query ini = 12 lon = ord(data[ini]) while lon", "4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP return", "'\\xc0\\x0c' # Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl", "this node print \"DNS R:\", r return r def response(self, r): packet =", "self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00'", "# Original Domain Name Question for ip in r: packet += '\\xc0\\x0c' #", "data self.domain = '' t = (ord(data[2]) >> 3) & 15 # Opcode", "+= data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb =", "0 if tag in services: service = services[tag] state_id = service.get('state_id') print \"DNS", "if t == 0: # Standard query ini = 12 lon = ord(data[ini])", "0: addr = n['addr'] # If already an ip, add it if ipv4pattern.match(addr):", "def __init__(self, data): self.data = data self.domain = '' t = (ord(data[2]) >>", "= n.get('services', {}) state_id = 0 if tag in services: service = services[tag]", "try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print 'DNS cannot", "nodes for the good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return []", "chr(d)+chr(r) return s # We look in the nodes for the good tag", "import re import socket pattern = r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class", "lon = ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r = divmod(nb,", "domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data length", "return r def response(self, r): packet = '' print \"DOM\", self.domain nb =", "Name Question for ip in r: packet += '\\xc0\\x0c' # Pointer to domain", "and resource data length -> 4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.')))", "\"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts", "(\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data): self.data = data", "+= self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions", "packet += '\\xc0\\x0c' # Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response", "for tag\", tag r = [] for n in nodes.values(): if tag in", "to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data", "+= lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r", "ip', addr # skip this node print \"DNS R:\", r return r def", "t = (ord(data[2]) >> 3) & 15 # Opcode bits if t ==", "ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r = divmod(nb, 256) s", "divmod(nb, 256) s = chr(d)+chr(r) return s # We look in the nodes", "except socket.gaierror: # not found print 'DNS cannot find the hotname ip', addr", "r): packet = '' print \"DOM\", self.domain nb = len(r) if self.domain: packet", "If already an ip, add it if ipv4pattern.match(addr): r.append(addr) else: # else try", "+= self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet +=", "= n['addr'] # If already an ip, add it if ipv4pattern.match(addr): r.append(addr) else:", "return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r =", "[] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r = []", "good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')]", "nodes): if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for", "cannot find the hotname ip', addr # skip this node print \"DNS R:\",", "state_id = 0 if tag in services: service = services[tag] state_id = service.get('state_id')", "find the hotname ip', addr # skip this node print \"DNS R:\", r", "while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini])", "+= self.data[12:] # Original Domain Name Question for ip in r: packet +=", "pattern = r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data):", "socket pattern = r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self,", "try to resolv it first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: #", "packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data length -> 4", "& 15 # Opcode bits if t == 0: # Standard query ini", "to resolv it first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not", "length -> 4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of", "skip this node print \"DNS R:\", r return r def response(self, r): packet", "Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource", "= ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r = divmod(nb, 256)", "state_id\", state_id if state_id == 0: addr = n['addr'] # If already an", "-> 4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP", "Original Domain Name Question for ip in r: packet += '\\xc0\\x0c' # Pointer", "self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet += self.data[12:]", "self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and", "tag\", tag r = [] for n in nodes.values(): if tag in n['tags']:", "the good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag =", "state_id = service.get('state_id') print \"DNS state_id\", state_id if state_id == 0: addr =", "in services: service = services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id if", "[] for n in nodes.values(): if tag in n['tags']: services = n.get('services', {})", "{}) state_id = 0 if tag in services: service = services[tag] state_id =", "# Opcode bits if t == 0: # Standard query ini = 12", "s = chr(d)+chr(r) return s # We look in the nodes for the", "Standard query ini = 12 lon = ord(data[ini]) while lon != 0: self.domain", "15 # Opcode bits if t == 0: # Standard query ini =", "ipv4pattern.match(addr): r.append(addr) else: # else try to resolv it first try: addr =", "self.data[12:] # Original Domain Name Question for ip in r: packet += '\\xc0\\x0c'", "name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data length ->", "Response type, ttl and resource data length -> 4 bytes packet += str.join('',map(lambda", "+ '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet += self.data[12:] # Original Domain", "\"DNS R:\", r return r def response(self, r): packet = '' print \"DOM\",", "DNSQuery: def __init__(self, data): self.data = data self.domain = '' t = (ord(data[2])", "self.domain = '' t = (ord(data[2]) >> 3) & 15 # Opcode bits", "# We look in the nodes for the good tag def lookup_for_nodes(self, nodes):", "print \"DOM\", self.domain nb = len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\"", "__init__(self, data): self.data = data self.domain = '' t = (ord(data[2]) >> 3)", "# skip this node print \"DNS R:\", r return r def response(self, r):", "self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb", "resolv it first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found", "print 'DNS cannot find the hotname ip', addr # skip this node print", "s # We look in the nodes for the good tag def lookup_for_nodes(self,", "+ \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers", "in r: packet += '\\xc0\\x0c' # Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04'", "= r\"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)([ (\\[]?(\\.|dot)[ )\\]]?(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3})\" ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data): self.data", "# Response type, ttl and resource data length -> 4 bytes packet +=", "= 12 lon = ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini", "= divmod(nb, 256) s = chr(d)+chr(r) return s # We look in the", "packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' #", "r = [] for n in nodes.values(): if tag in n['tags']: services =", "r.append(addr) else: # else try to resolv it first try: addr = socket.gethostbyname(addr)", "tag in services: service = services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id", "else: # else try to resolv it first try: addr = socket.gethostbyname(addr) r.append(addr)", "n in nodes.values(): if tag in n['tags']: services = n.get('services', {}) state_id =", "print \"DNS R:\", r return r def response(self, r): packet = '' print", "'\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and resource data length -> 4 bytes packet", "'' t = (ord(data[2]) >> 3) & 15 # Opcode bits if t", "+= '\\xc0\\x0c' # Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type,", "bits if t == 0: # Standard query ini = 12 lon =", "self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r", "0: # Standard query ini = 12 lon = ord(data[ini]) while lon !=", "len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb)", "addr # skip this node print \"DNS R:\", r return r def response(self,", "packet += self.data[12:] # Original Domain Name Question for ip in r: packet", "n.get('services', {}) state_id = 0 if tag in services: service = services[tag] state_id", "lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def", "ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256)", "256) s = chr(d)+chr(r) return s # We look in the nodes for", "'' print \"DOM\", self.domain nb = len(r) if self.domain: packet += self.data[:2] +", "an ip, add it if ipv4pattern.match(addr): r.append(addr) else: # else try to resolv", "\"DOM\", self.domain nb = len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet", "self.domain nb = len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet +=", "# Standard query ini = 12 lon = ord(data[ini]) while lon != 0:", "data length -> 4 bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes", "# Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' # Response type, ttl and", "= re.compile(pattern) class DNSQuery: def __init__(self, data): self.data = data self.domain = ''", "the nodes for the good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return", "if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\",", "lon = ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1", "0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb):", "if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] + self._get_size_hex(nb) +", "= data self.domain = '' t = (ord(data[2]) >> 3) & 15 #", "socket.gaierror: # not found print 'DNS cannot find the hotname ip', addr #", "ini = 12 lon = ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.'", "nb = len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6]", "+ self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet += self.data[12:] #", "# not found print 'DNS cannot find the hotname ip', addr # skip", "self._get_size_hex(nb) + '\\x00\\x00\\x00\\x00' # Questions and Answers Counts packet += self.data[12:] # Original", "query ini = 12 lon = ord(data[ini]) while lon != 0: self.domain +=", "= (ord(data[2]) >> 3) & 15 # Opcode bits if t == 0:", "self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag r = [] for n in", "= 0 if tag in services: service = services[tag] state_id = service.get('state_id') print", "nb = min(nb, 256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r) return s", "services: service = services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id if state_id", "n['tags']: services = n.get('services', {}) state_id = 0 if tag in services: service", "return s # We look in the nodes for the good tag def", "r: packet += '\\xc0\\x0c' # Pointer to domain name packet += '\\x00\\x01\\x00\\x01\\x00\\x00\\x00\\x3c\\x00\\x04' #", "= chr(d)+chr(r) return s # We look in the nodes for the good", "lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb, 256*256) d,r =", "We look in the nodes for the good tag def lookup_for_nodes(self, nodes): if", "state_id if state_id == 0: addr = n['addr'] # If already an ip,", "= [] for n in nodes.values(): if tag in n['tags']: services = n.get('services',", "ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon =", "Opcode bits if t == 0: # Standard query ini = 12 lon", "socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print 'DNS cannot find the hotname", "!= 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self,", "= socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print 'DNS cannot find the", "r return r def response(self, r): packet = '' print \"DOM\", self.domain nb", "data): self.data = data self.domain = '' t = (ord(data[2]) >> 3) &", "look in the nodes for the good tag def lookup_for_nodes(self, nodes): if not", "= '' t = (ord(data[2]) >> 3) & 15 # Opcode bits if", "and Answers Counts packet += self.data[12:] # Original Domain Name Question for ip", "ttl and resource data length -> 4 bytes packet += str.join('',map(lambda x: chr(int(x)),", "# If already an ip, add it if ipv4pattern.match(addr): r.append(addr) else: # else", "services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id if state_id == 0: addr", "addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print 'DNS cannot find", "data[ini+1:ini+lon+1]+'.' ini += lon+1 lon = ord(data[ini]) def _get_size_hex(self, nb): nb = min(nb,", "= ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini += lon+1 lon", "it first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print", "Question for ip in r: packet += '\\xc0\\x0c' # Pointer to domain name", "t == 0: # Standard query ini = 12 lon = ord(data[ini]) while", "= len(r) if self.domain: packet += self.data[:2] + \"\\x81\\x80\" packet += self.data[4:6] +", "lookup for tag\", tag r = [] for n in nodes.values(): if tag", "lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup", "in n['tags']: services = n.get('services', {}) state_id = 0 if tag in services:", "first try: addr = socket.gethostbyname(addr) r.append(addr) except socket.gaierror: # not found print 'DNS", "= min(nb, 256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r) return s #", "= service.get('state_id') print \"DNS state_id\", state_id if state_id == 0: addr = n['addr']", "d,r = divmod(nb, 256) s = chr(d)+chr(r) return s # We look in", "response(self, r): packet = '' print \"DOM\", self.domain nb = len(r) if self.domain:", "== 0: addr = n['addr'] # If already an ip, add it if", "if tag in n['tags']: services = n.get('services', {}) state_id = 0 if tag", "Questions and Answers Counts packet += self.data[12:] # Original Domain Name Question for", "not self.domain.endswith('.kunai.'): return [] tag = self.domain[:-len('.kunai.')] print \"DNS lookup for tag\", tag", "for n in nodes.values(): if tag in n['tags']: services = n.get('services', {}) state_id", "node print \"DNS R:\", r return r def response(self, r): packet = ''", "bytes packet += str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP return packet", "for the good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'): return [] tag", "= services[tag] state_id = service.get('state_id') print \"DNS state_id\", state_id if state_id == 0:", "print \"DNS state_id\", state_id if state_id == 0: addr = n['addr'] # If", "self.data = data self.domain = '' t = (ord(data[2]) >> 3) & 15", "state_id == 0: addr = n['addr'] # If already an ip, add it", "# Questions and Answers Counts packet += self.data[12:] # Original Domain Name Question", "Counts packet += self.data[12:] # Original Domain Name Question for ip in r:", "for ip in r: packet += '\\xc0\\x0c' # Pointer to domain name packet", "12 lon = ord(data[ini]) while lon != 0: self.domain += data[ini+1:ini+lon+1]+'.' ini +=", "addr = n['addr'] # If already an ip, add it if ipv4pattern.match(addr): r.append(addr)", "\"DNS lookup for tag\", tag r = [] for n in nodes.values(): if", "if ipv4pattern.match(addr): r.append(addr) else: # else try to resolv it first try: addr", "'DNS cannot find the hotname ip', addr # skip this node print \"DNS", "Answers Counts packet += self.data[12:] # Original Domain Name Question for ip in", "nb): nb = min(nb, 256*256) d,r = divmod(nb, 256) s = chr(d)+chr(r) return", "ipv4pattern = re.compile(pattern) class DNSQuery: def __init__(self, data): self.data = data self.domain =", "r.append(addr) except socket.gaierror: # not found print 'DNS cannot find the hotname ip',", "in the nodes for the good tag def lookup_for_nodes(self, nodes): if not self.domain.endswith('.kunai.'):", "not found print 'DNS cannot find the hotname ip', addr # skip this" ]
[ "StopIteration: return # Generous Bot - follows ppl back # for follower in", "<filename>02-Scripting/05-Twitter_Bot/twitter.py import tweepy import os import time from dotenv import load_dotenv load_dotenv() consumer_key", "2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that", "# public_tweets = api.home_timeline() # for tweet in public_tweets: # print(tweet.text) # helper", "in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that tweet') except tweepy.TweepError", "while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay and wait", "1000 ms except StopIteration: return # Generous Bot - follows ppl back #", "= os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth =", "os import time from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret =", "yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay and wait in this", "public_tweets: # print(tweet.text) # helper function def limit_handler(cursor): try: while True: yield cursor.next()", "in this line for 1000 ms except StopIteration: return # Generous Bot -", "if follower.name == \"<NAME>\": # follower.follow() # break # Narcissist Bot search_str =", "def limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will", "from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token =", "for tweet in public_tweets: # print(tweet.text) # helper function def limit_handler(cursor): try: while", "import time from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret')", "cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay and wait in this line", "# it will stay and wait in this line for 1000 ms except", "for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that tweet')", "os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret)", "# tweet.retweet() print('I liked that tweet') except tweepy.TweepError as e: print(e.reason) except StopIteration:", "try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay and", "# for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\": #", "number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I", "import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret", "# Narcissist Bot search_str = '<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search,", "tweepy.API(auth) user = api.me() # print(user.name) # public_tweets = api.home_timeline() # for tweet", "load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret')", "= os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token,", "# Generous Bot - follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): #", "ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name ==", "in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\": # follower.follow() # break", "function def limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it", "and wait in this line for 1000 ms except StopIteration: return # Generous", "tweet.retweet() print('I liked that tweet') except tweepy.TweepError as e: print(e.reason) except StopIteration: break", "= 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked", "= '<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() #", "return # Generous Bot - follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()):", "= tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me() # print(user.name)", "line for 1000 ms except StopIteration: return # Generous Bot - follows ppl", "tweepy import os import time from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key')", "stay and wait in this line for 1000 ms except StopIteration: return #", "access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user", "public_tweets = api.home_timeline() # for tweet in public_tweets: # print(tweet.text) # helper function", "== \"<NAME>\": # follower.follow() # break # Narcissist Bot search_str = '<NAME>' number_of_tweets", "time from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token", "dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token')", "print(tweet.text) # helper function def limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError:", "auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me() #", "follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name", "consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me() # print(user.name) # public_tweets", "Narcissist Bot search_str = '<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)):", "break # Narcissist Bot search_str = '<NAME>' number_of_tweets = 2 for tweet in", "for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\": # follower.follow()", "consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret)", "auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me() # print(user.name) # public_tweets =", "limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay", "= os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user =", "'<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet()", "Bot - follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) #", "# break # Narcissist Bot search_str = '<NAME>' number_of_tweets = 2 for tweet", "limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\": # follower.follow() # break #", "load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret =", "this line for 1000 ms except StopIteration: return # Generous Bot - follows", "\"<NAME>\": # follower.follow() # break # Narcissist Bot search_str = '<NAME>' number_of_tweets =", "limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that tweet') except tweepy.TweepError as", "os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me()", "Bot search_str = '<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try:", "tweepy.RateLimitError: time.sleep(1000) # it will stay and wait in this line for 1000", "will stay and wait in this line for 1000 ms except StopIteration: return", "search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that tweet') except tweepy.TweepError as e:", "# print(user.name) # public_tweets = api.home_timeline() # for tweet in public_tweets: # print(tweet.text)", "print(user.name) # public_tweets = api.home_timeline() # for tweet in public_tweets: # print(tweet.text) #", "tweet in public_tweets: # print(tweet.text) # helper function def limit_handler(cursor): try: while True:", "api.me() # print(user.name) # public_tweets = api.home_timeline() # for tweet in public_tweets: #", "in public_tweets: # print(tweet.text) # helper function def limit_handler(cursor): try: while True: yield", "# print(follower.name) # if follower.name == \"<NAME>\": # follower.follow() # break # Narcissist", "Generous Bot - follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name)", "tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) user = api.me() # print(user.name) #", "access_token_secret) api = tweepy.API(auth) user = api.me() # print(user.name) # public_tweets = api.home_timeline()", "ms except StopIteration: return # Generous Bot - follows ppl back # for", "search_str = '<NAME>' number_of_tweets = 2 for tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite()", "for 1000 ms except StopIteration: return # Generous Bot - follows ppl back", "# print(tweet.text) # helper function def limit_handler(cursor): try: while True: yield cursor.next() except", "print(follower.name) # if follower.name == \"<NAME>\": # follower.follow() # break # Narcissist Bot", "- follows ppl back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if", "time.sleep(1000) # it will stay and wait in this line for 1000 ms", "it will stay and wait in this line for 1000 ms except StopIteration:", "except tweepy.RateLimitError: time.sleep(1000) # it will stay and wait in this line for", "access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api", "api.home_timeline() # for tweet in public_tweets: # print(tweet.text) # helper function def limit_handler(cursor):", "True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) # it will stay and wait in", "wait in this line for 1000 ms except StopIteration: return # Generous Bot", "helper function def limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000) #", "tweet in limit_handler(tweepy.Cursor(api.search, search_str).items(number_of_tweets)): try: tweet.favorite() # tweet.retweet() print('I liked that tweet') except", "# helper function def limit_handler(cursor): try: while True: yield cursor.next() except tweepy.RateLimitError: time.sleep(1000)", "back # for follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\":", "api = tweepy.API(auth) user = api.me() # print(user.name) # public_tweets = api.home_timeline() #", "= os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api =", "follower in limit_handler(tweepy.Cursor(api.followers).items()): # print(follower.name) # if follower.name == \"<NAME>\": # follower.follow() #", "tweet.favorite() # tweet.retweet() print('I liked that tweet') except tweepy.TweepError as e: print(e.reason) except", "import os import time from dotenv import load_dotenv load_dotenv() consumer_key = os.getenv('consumer_key') consumer_secret", "os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth)", "follower.name == \"<NAME>\": # follower.follow() # break # Narcissist Bot search_str = '<NAME>'", "import tweepy import os import time from dotenv import load_dotenv load_dotenv() consumer_key =", "# follower.follow() # break # Narcissist Bot search_str = '<NAME>' number_of_tweets = 2", "follower.follow() # break # Narcissist Bot search_str = '<NAME>' number_of_tweets = 2 for", "# if follower.name == \"<NAME>\": # follower.follow() # break # Narcissist Bot search_str", "user = api.me() # print(user.name) # public_tweets = api.home_timeline() # for tweet in", "= api.me() # print(user.name) # public_tweets = api.home_timeline() # for tweet in public_tweets:", "= api.home_timeline() # for tweet in public_tweets: # print(tweet.text) # helper function def", "# for tweet in public_tweets: # print(tweet.text) # helper function def limit_handler(cursor): try:", "try: tweet.favorite() # tweet.retweet() print('I liked that tweet') except tweepy.TweepError as e: print(e.reason)", "= tweepy.API(auth) user = api.me() # print(user.name) # public_tweets = api.home_timeline() # for", "except StopIteration: return # Generous Bot - follows ppl back # for follower", "consumer_key = os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth", "os.getenv('consumer_key') consumer_secret = os.getenv('consumer_secret') access_token = os.getenv('access_token') access_token_secret = os.getenv('access_token_secret') auth = tweepy.OAuthHandler(consumer_key," ]
[ "__name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt' save_filename = 'domain_name.txt' batch_query(filename, save_filename)", "Author: Senkita Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os", "Description: ip反查域名 Author: Senkita Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 '''", "from utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt'", "os from utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename =", "Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query", "2020-10-09 15:01:39 ''' import os from utils.Query import batch_query if __name__ == \"__main__\":", "utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt' save_filename", "if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt' save_filename = 'domain_name.txt' batch_query(filename,", "LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query import batch_query if __name__ ==", "LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query import batch_query if", "import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt' save_filename =", "10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query import batch_query", "import os from utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename", "''' import os from utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True)", "''' Description: ip反查域名 Author: Senkita Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39", "Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query import batch_query if __name__", "ip反查域名 Author: Senkita Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import", "batch_query if __name__ == \"__main__\": os.makedirs('./Log', exist_ok=True) filename = 'public.txt' save_filename = 'domain_name.txt'", "Senkita Date: 2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from", "15:01:39 ''' import os from utils.Query import batch_query if __name__ == \"__main__\": os.makedirs('./Log',", "2020-10-09 10:23:52 LastEditors: Senkita LastEditTime: 2020-10-09 15:01:39 ''' import os from utils.Query import" ]
[ "Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric fields = (", "serializers.ModelSerializer ): class Meta: model = Metric fields = ( \"title\", \"created_at\", \"metric\"", "): class Meta: model = Metric fields = ( \"title\", \"created_at\", \"metric\" )", "import serializers from .models import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model", "from rest_framework import serializers from .models import Metric class MetricSerializer( serializers.ModelSerializer ): class", ".models import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric fields", "from .models import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric", "import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric fields =", "MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric fields = ( \"title\", \"created_at\",", "rest_framework import serializers from .models import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta:", "serializers from .models import Metric class MetricSerializer( serializers.ModelSerializer ): class Meta: model =", "class MetricSerializer( serializers.ModelSerializer ): class Meta: model = Metric fields = ( \"title\"," ]
[ "x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r", "transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies dx", "the corresponding frequencies dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size)", "x: np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x)", "spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x)", "np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin',", "= 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] =", "def spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A)", "# Mesh L = 10; Nx = 100 x = np.linspace(0, L, Nx+1)", "x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute", "def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile", "A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100 x =", "as np import matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier transform", "np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s", "1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile", "spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf') plt.savefig(figfile", "0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x:", "return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile +", "matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x))", "= np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx =", "numpy as np import matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier", "spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return", "1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f,", "= 1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak'])", "freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx", "corresponding frequencies dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2],", "s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2]", "x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x)", "Mesh L = 10; Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda", "spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) #", "= 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf') plt.savefig(figfile + '.png') plt.show()", "L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L))", "x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L", "frequencies dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2])", "< 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s =", "spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)),", "figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf') plt.savefig(figfile + '.png')", "A_amplitude = np.abs(A) # Compute the corresponding frequencies dx = x[1] - x[0]", "np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r =", "np import matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier transform A", "L = 10; Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x:", "- x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L =", "plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100 x = np.linspace(0,", "= 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1,", "Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies", "= np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies dx = x[1]", "x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10;", "np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies dx = x[1] -", "= np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda", "dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) #", "= 10; Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x", "as plt def spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude", "5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5", "= x[1] - x[0] freqs = np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh", "10; Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x <", "x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2),", "Nx = 100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5,", "Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) +", "x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf') plt.savefig(figfile +", "Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding", "# Compute the corresponding frequencies dx = x[1] - x[0] freqs = np.linspace(0,", "x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x)", "100 x = np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0),", "A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100 x = np.linspace(0, L,", "r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile = 'tmp'", "np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100 x", "= np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step',", "import numpy as np import matplotlib.pyplot as plt def spectrum(f, x): # Discrete", "np.abs(A) # Compute the corresponding frequencies dx = x[1] - x[0] freqs =", "np.linspace(0, np.pi/dx, A_amplitude.size) plt.plot(freqs[:len(freqs)/2], A_amplitude[:len(freqs)/2]) # Mesh L = 10; Nx = 100", "0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x) r[len(x)/2] = 1", "import matplotlib.pyplot as plt def spectrum(f, x): # Discrete Fourier transform A =", "1, 0), x) spectrum(lambda x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda", "x: np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def", "# Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the", "r[len(x)/2] = 1 return r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss',", "= np.abs(A) # Compute the corresponding frequencies dx = x[1] - x[0] freqs", "r spectrum(f, x) figfile = 'tmp' plt.legend(['step', '2sin', 'gauss', 'peak']) plt.savefig(figfile + '.pdf')", "x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r = np.zeros_like(x)", "Compute the corresponding frequencies dx = x[1] - x[0] freqs = np.linspace(0, np.pi/dx,", "np.linspace(0, L, Nx+1) spectrum(lambda x: np.where(x < 5, 1, 0), x) spectrum(lambda x:", "+ np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x): r", "f(x): r = np.zeros_like(x) r[len(x)/2] = 1 return r spectrum(f, x) figfile =", "np.sin(np.pi*x/float(L)) + np.sin(np.pi*20*x/float(L)), x) s = 0.5 spectrum(lambda x: 1./(np.sqrt(2*np.pi)*s)*np.exp(-0.5*((x-L/2.)/s)**2), x) def f(x):", "plt def spectrum(f, x): # Discrete Fourier transform A = np.fft.rfft(f(x)) A_amplitude =", "A = np.fft.rfft(f(x)) A_amplitude = np.abs(A) # Compute the corresponding frequencies dx =" ]
[ "P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model = Review fields = ['text']", "from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model = Review fields =", "django.forms import ModelForm from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model =", "import ModelForm from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model = Review", "from django.forms import ModelForm from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model", "<reponame>nik-sergeson/bsuir-informatics-labs<gh_stars>0 from django.forms import ModelForm from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta:", "ModelForm from P2PLending.reviews.models import Review class ReviewForm(ModelForm): class Meta: model = Review fields" ]
[ ") return job_results = response.get(\"jobs\", []) if job_results: for result in job_results: if", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", ") try: request.execute() except Exception as e: # generic catch if 4xx error", ") ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\",", "job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries >", "not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\"", "timeout: try: resp = request.execute() except Exception as e: msg = ( \"Failed", "region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if", "this file except in compliance with the License. # You may obtain a", "{} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries", "after 30s...\".format( req_state, job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries)", "( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() <", "in JOB_STATE_MAP.values(): return else: msg = \"Waiting for job '{}' to reach terminal", "job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to", "project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region ) if", "# limitations under the License. # import datetime import logging import time import", ") ) while datetime.datetime.now() < timeout: try: resp = request.execute() except Exception as", "ANY KIND, either express or implied. # See the License for the specific", "as e: logging.warning( \"Could not find running job '{}' in project '{}': {}\".format(", "in job_results: if result[\"name\"] == job_name: return result def _update_job_state(self, job, req_state=None, retries=None):", "Exception as e: # generic catch if 4xx error - probably shouldn't retry", "= _req_state request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job,", "def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects()", ".update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except Exception as e:", "terminal state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self,", "req_state, retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request =", "jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except Exception as e: #", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "not find running job '{}' in project '{}': {}\".format( job_name, project, e )", "return else: msg = \"Waiting for job '{}' to reach terminal state...\".format( job[\"name\"]", "= request.execute() except Exception as e: msg = ( \"Failed to get current", "= \"Max retries reached: could not {} job '{}': {}\".format( req_state, job[\"name\"], e", "job_results: if result[\"name\"] == job_name: return result def _update_job_state(self, job, req_state=None, retries=None): if", "return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\" else \"drained\"", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "{}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries > 2: msg", "> 2: msg = \"Max retries reached: could not {} job '{}': {}\".format(", "result def _update_job_state(self, job, req_state=None, retries=None): if retries is None: retries = 0", "timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get(", "OF ANY KIND, either express or implied. # See the License for the", "_req_state request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, )", "Spotify AB # # Licensed under the Apache License, Version 2.0 (the \"License\");", ") logging.error(msg) raise SystemExit(1) if retries > 2: msg = \"Max retries reached:", "job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying", "discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version)", "StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version =", "= \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request =", "Exception as e: logging.warning( \"Could not find running job '{}' in project '{}':", "logging.info(msg) time.sleep(5) msg = \"Job '{}' did not reach terminal state after '{}'", "again after 30s...\".format( req_state, job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job, req_state,", "= ( \"Failed to get current status for job '{}'. Error: {}.\\n\" \"Trying", "= datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"],", "req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job '{}'.", "if result[\"name\"] == job_name: return result def _update_job_state(self, job, req_state=None, retries=None): if retries", "googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self,", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", ") logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job", "running job '{}' in project '{}': {}\".format( job_name, project, e ) ) logging.warning(", "not _req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"],", "secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region, strategy,", "= 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object):", "== \"cancel\" else \"drained\" msg = \"Successfully {} job '{}' :smile_cat:\".format(verb, job_name) logging.info(emoji.emojize(msg,", "api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region):", "limitations under the License. # import datetime import logging import time import emoji", "= \"Job '{}' did not reach terminal state after '{}' secs.\".format( job[\"name\"], timeout", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", ".locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except Exception", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "resp = request.execute() except Exception as e: msg = ( \"Failed to get", "if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting for job '{}' to", ") logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", [])", "\"Job '{}' did not reach terminal state after '{}' secs.\".format( job[\"name\"], timeout )", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# import datetime import logging import time import emoji from googleapiclient import discovery", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "reached: could not {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise", "for result in job_results: if result[\"name\"] == job_name: return result def _update_job_state(self, job,", "while datetime.datetime.now() < timeout: try: resp = request.execute() except Exception as e: msg", "state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}' did not reach terminal", "\"Waiting for job '{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg", "retries=None): if retries is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if", "or agreed to in writing, software # distributed under the License is distributed", "if strategy == \"cancel\" else \"drained\" msg = \"Successfully {} job '{}' :smile_cat:\".format(verb,", "License. # import datetime import logging import time import emoji from googleapiclient import", "governing permissions and # limitations under the License. # import datetime import logging", "find running job '{}' in project '{}': {}\".format( job_name, project, e ) )", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "to {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if", "else: msg = \"Waiting for job '{}' to reach terminal state...\".format( job[\"name\"] )", "'{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region,", "retry if getattr(e, \"resp\", None): if e.resp.status < 500: msg = \"Failed to", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "job_name, project, e ) ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return", "writing, software # distributed under the License is distributed on an \"AS IS\"", "< 500: msg = \"Failed to {} job '{}': {}\".format( req_state, job[\"name\"], e", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "if getattr(e, \"resp\", None): if e.resp.status < 500: msg = \"Failed to {}", "\"Failed to get current status for job '{}'. Error: {}.\\n\" \"Trying again after", "License. # You may obtain a copy of the License at # #", "as e: msg = ( \"Failed to get current status for job '{}'.", "\"Max retries reached: could not {} job '{}': {}\".format( req_state, job[\"name\"], e )", "attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if job_results: for result", "resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting for job '{}' to reach", "{}\".format( job_name, project, e ) ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) )", "compliance with the License. # You may obtain a copy of the License", "4xx error - probably shouldn't retry if getattr(e, \"resp\", None): if e.resp.status <", "if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy ==", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "import time import emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\":", "strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region ) if not current_running_job:", "Exception as e: msg = ( \"Failed to get current status for job", "not use this file except in compliance with the License. # You may", "job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region )", "30s...\".format( req_state, job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def", "req_state, job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self,", "= ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute()", "_check_job_running(self, job_name, project, region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",)", "License, Version 2.0 (the \"License\"); # you may not use this file except", "language governing permissions and # limitations under the License. # import datetime import", "datetime import logging import time import emoji from googleapiclient import discovery JOB_STATE_MAP =", "retries > 2: msg = \"Max retries reached: could not {} job '{}':", ") logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying again after", "def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\"", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response =", "'{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {}", "= JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request =", "# you may not use this file except in compliance with the License.", "not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project,", "agreed to in writing, software # distributed under the License is distributed on", "under the License. # import datetime import logging import time import emoji from", "\"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version:", "time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)", "(the \"License\"); # you may not use this file except in compliance with", "1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() +", "job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"],", "job '{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job", "{\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version):", "# Unless required by applicable law or agreed to in writing, software #", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", ".jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try: resp", "self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request = ( self._client.projects()", "api_version): if not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self,", ") try: response = request.execute() except Exception as e: logging.warning( \"Could not find", "deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if job_results: for result in", "{}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job", "req_state=None, retries=None): if retries is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"])", "500: msg = \"Failed to {} job '{}': {}\".format( req_state, job[\"name\"], e )", ") retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout", "response.get(\"jobs\", []) if job_results: for result in job_results: if result[\"name\"] == job_name: return", "job, req_state=None, retries=None): if retries is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state,", "self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\",", "file except in compliance with the License. # You may obtain a copy", "current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\" else", "current status for job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) )", "raise SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying again after 30s...\".format( req_state,", "Trying again after 30s...\".format( req_state, job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job,", ") ) try: request.execute() except Exception as e: # generic catch if 4xx", ".list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except Exception as e: logging.warning(", "License for the specific language governing permissions and # limitations under the License.", "job_name: return result def _update_job_state(self, job, req_state=None, retries=None): if retries is None: retries", "self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except Exception", "to in writing, software # distributed under the License is distributed on an", "\"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request = (", "retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"]", "implied. # See the License for the specific language governing permissions and #", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "== job_name: return result def _update_job_state(self, job, req_state=None, retries=None): if retries is None:", "filter=\"ACTIVE\",) ) try: response = request.execute() except Exception as e: logging.warning( \"Could not", "- probably shouldn't retry if getattr(e, \"resp\", None): if e.resp.status < 500: msg", "time.sleep(5) msg = \"Job '{}' did not reach terminal state after '{}' secs.\".format(", "e ) logging.error(msg) raise SystemExit(1) if retries > 2: msg = \"Max retries", "self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\" else \"drained\" msg = \"Successfully", "or implied. # See the License for the specific language governing permissions and", "job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue", "raise SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running(", "not {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info(", "project, region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\"", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "try: resp = request.execute() except Exception as e: msg = ( \"Failed to", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if", "if not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name,", "not reach terminal state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1)", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"],", "+ datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], )", "self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region ) if not current_running_job: return self._update_job_state(current_running_job,", "job_name, project, region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) )", "= \"Failed to {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise", "def _check_job_running(self, job_name, project, region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region,", "shouldn't retry if getattr(e, \"resp\", None): if e.resp.status < 500: msg = \"Failed", "None): if e.resp.status < 500: msg = \"Failed to {} job '{}': {}\".format(", "strategy == \"cancel\" else \"drained\" msg = \"Successfully {} job '{}' :smile_cat:\".format(verb, job_name)", "= response.get(\"jobs\", []) if job_results: for result in job_results: if result[\"name\"] == job_name:", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "job[\"name\"] ) ) retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job,", "( \"Failed to get current status for job '{}'. Error: {}.\\n\" \"Trying again", "Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"]", "after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name, project,", "if 4xx error - probably shouldn't retry if getattr(e, \"resp\", None): if e.resp.status", ") if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy", ".jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except Exception as e:", "msg = \"Failed to {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg)", "self._check_job_running( job_name, project, region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "_set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def", "you may not use this file except in compliance with the License. #", "e: logging.warning( \"Could not find running job '{}' in project '{}': {}\".format( job_name,", "api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request", "to get current status for job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"],", "e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying again", "import emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class", "= \"Waiting for job '{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5)", "'{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if", ".get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try: resp =", "use this file except in compliance with the License. # You may obtain", "time import emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"}", "= self._check_job_running( job_name, project, region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job)", "Copyright 2019-2020 Spotify AB # # Licensed under the Apache License, Version 2.0", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "return result def _update_job_state(self, job, req_state=None, retries=None): if retries is None: retries =", "after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else:", "# generic catch if 4xx error - probably shouldn't retry if getattr(e, \"resp\",", "e: msg = ( \"Failed to get current status for job '{}'. Error:", "job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs()", "e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg =", "msg = \"Waiting for job '{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg)", "2.0 (the \"License\"); # you may not use this file except in compliance", "JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects()", "get current status for job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e)", "= ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try:", "if retries is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\")", "{}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in", "result in job_results: if result[\"name\"] == job_name: return result def _update_job_state(self, job, req_state=None,", "for the specific language governing permissions and # limitations under the License. #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "is not _req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations() .jobs() .update(", "could not {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1)", "probably shouldn't retry if getattr(e, \"resp\", None): if e.resp.status < 500: msg =", "# # Unless required by applicable law or agreed to in writing, software", "'{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries > 2:", "response = request.execute() except Exception as e: logging.warning( \"Could not find running job", "express or implied. # See the License for the specific language governing permissions", "request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while", "None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state:", "retries reached: could not {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg)", ") logging.info(msg) time.sleep(5) msg = \"Job '{}' did not reach terminal state after", "logging.error(msg) raise SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying again after 30s...\".format(", "_req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request", "if retries > 2: msg = \"Max retries reached: could not {} job", "either express or implied. # See the License for the specific language governing", "5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg", "AB # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "return job_results = response.get(\"jobs\", []) if job_results: for result in job_results: if result[\"name\"]", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request = (", "time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting for job", "api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\" self._client =", "2: msg = \"Max retries reached: could not {} job '{}': {}\".format( req_state,", "logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting for", "retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = (", "logging import time import emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\",", "SystemExit(1) if retries > 2: msg = \"Max retries reached: could not {}", "the License. # You may obtain a copy of the License at #", "logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job =", "for job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5)", "error - probably shouldn't retry if getattr(e, \"resp\", None): if e.resp.status < 500:", "self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\" else \"drained\" msg", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "job_results = response.get(\"jobs\", []) if job_results: for result in job_results: if result[\"name\"] ==", "region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response", "the License. # import datetime import logging import time import emoji from googleapiclient", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}' did not reach terminal state", "logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if", "timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version)", "\"cancel\" else \"drained\" msg = \"Successfully {} job '{}' :smile_cat:\".format(verb, job_name) logging.info(emoji.emojize(msg, use_aliases=True))", "'{}' did not reach terminal state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg)", "region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region ) if not", "e ) ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results =", "as e: # generic catch if 4xx error - probably shouldn't retry if", "is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not", "JOB_STATE_MAP.values(): return else: msg = \"Waiting for job '{}' to reach terminal state...\".format(", "body=job, ) ) try: request.execute() except Exception as e: # generic catch if", "current_running_job = self._check_job_running( job_name, project, region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy)", ".locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except Exception as", "projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try: resp = request.execute() except", "= \"cancelled\" if strategy == \"cancel\" else \"drained\" msg = \"Successfully {} job", "state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name,", "to attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if job_results: for", "continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting for job '{}'", "retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout =", "with the License. # You may obtain a copy of the License at", "generic catch if 4xx error - probably shouldn't retry if getattr(e, \"resp\", None):", "\"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not", "job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None):", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "import datetime import logging import time import emoji from googleapiclient import discovery JOB_STATE_MAP", "job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"] ) ) retries += 1", "in project '{}': {}\".format( job_name, project, e ) ) logging.warning( \"Continuing to attempt", "msg = \"Job '{}' did not reach terminal state after '{}' secs.\".format( job[\"name\"],", "'{}': {}\".format( job_name, project, e ) ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name)", "def _set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\" self._client = discovery.build(\"dataflow\", api_version)", "{} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) logging.info( \"Failed", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except Exception as e: logging.warning( \"Could", "SystemExit(1) logging.info( \"Failed to {} job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"]", "discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request = ( self._client.projects() .locations() .jobs()", "msg = \"Max retries reached: could not {} job '{}': {}\".format( req_state, job[\"name\"],", "self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except", "permissions and # limitations under the License. # import datetime import logging import", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "request.execute() except Exception as e: logging.warning( \"Could not find running job '{}' in", "\"resp\", None): if e.resp.status < 500: msg = \"Failed to {} job '{}':", "self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout:", "except Exception as e: logging.warning( \"Could not find running job '{}' in project", ") ) retries += 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600):", "'{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}'", "self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request", "SystemExit(1) def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name,", "_watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations()", "catch if 4xx error - probably shouldn't retry if getattr(e, \"resp\", None): if", "\"Failed to {} job '{}': {}\".format( req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1)", "request.execute() except Exception as e: # generic catch if 4xx error - probably", "\"Could not find running job '{}' in project '{}': {}\".format( job_name, project, e", "datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) )", "to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}' did", "reach terminal state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise SystemExit(1) def", ".locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try:", "jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try: resp = request.execute()", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None):", "for job '{}' to reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "project '{}': {}\".format( job_name, project, e ) ) logging.warning( \"Continuing to attempt deploying", "did not reach terminal state after '{}' secs.\".format( job[\"name\"], timeout ) logging.error(msg) raise", "_req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"],", ".jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except Exception as", "\"cancelled\" if strategy == \"cancel\" else \"drained\" msg = \"Successfully {} job '{}'", "job '{}' in project '{}': {}\".format( job_name, project, e ) ) logging.warning( \"Continuing", "( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try: response = request.execute() except", "logging.error(msg) raise SystemExit(1) if retries > 2: msg = \"Max retries reached: could", "See the License for the specific language governing permissions and # limitations under", "location=job[\"location\"], body=job, ) ) try: request.execute() except Exception as e: # generic catch", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "# Copyright 2019-2020 Spotify AB # # Licensed under the Apache License, Version", "request = ( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) )", "except Exception as e: # generic catch if 4xx error - probably shouldn't", "'{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if job_results: for result in job_results:", "job_results: for result in job_results: if result[\"name\"] == job_name: return result def _update_job_state(self,", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "datetime.datetime.now() + datetime.timedelta(seconds=timeout) request = ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"],", "_update_job_state(self, job, req_state=None, retries=None): if retries is None: retries = 0 _req_state =", "verb = \"cancelled\" if strategy == \"cancel\" else \"drained\" msg = \"Successfully {}", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "req_state=strategy) self._watch_job_state(current_running_job) verb = \"cancelled\" if strategy == \"cancel\" else \"drained\" msg =", "try: request.execute() except Exception as e: # generic catch if 4xx error -", "projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute() except Exception as e: # generic", "job_name, project, region ) if not current_running_job: return self._update_job_state(current_running_job, req_state=strategy) self._watch_job_state(current_running_job) verb =", "0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state", "api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region ) if not current_running_job: return", "to {} job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"] ) ) retries", "try: response = request.execute() except Exception as e: logging.warning( \"Could not find running", "the specific language governing permissions and # limitations under the License. # import", "e: # generic catch if 4xx error - probably shouldn't retry if getattr(e,", ") logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return else: msg = \"Waiting", "location=job[\"location\"], ) ) while datetime.datetime.now() < timeout: try: resp = request.execute() except Exception", "result[\"name\"] == job_name: return result def _update_job_state(self, job, req_state=None, retries=None): if retries is", "msg = ( \"Failed to get current status for job '{}'. Error: {}.\\n\"", "Version 2.0 (the \"License\"); # you may not use this file except in", "again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values(): return", "except in compliance with the License. # You may obtain a copy of", "except Exception as e: msg = ( \"Failed to get current status for", ") while datetime.datetime.now() < timeout: try: resp = request.execute() except Exception as e:", "\"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results = response.get(\"jobs\", []) if job_results:", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "if e.resp.status < 500: msg = \"Failed to {} job '{}': {}\".format( req_state,", "+= 1 time.sleep(30) self._update_job_state(job, req_state, retries) def _watch_job_state(self, job, timeout=600): timeout = datetime.datetime.now()", "request.execute() except Exception as e: msg = ( \"Failed to get current status", "getattr(e, \"resp\", None): if e.resp.status < 500: msg = \"Failed to {} job", "reach terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}' did not", "'{}'. Trying again after 30s...\".format( req_state, job[\"name\"] ) ) retries += 1 time.sleep(30)", "'{}' in project '{}': {}\".format( job_name, project, e ) ) logging.warning( \"Continuing to", "specific language governing permissions and # limitations under the License. # import datetime", "raise SystemExit(1) if retries > 2: msg = \"Max retries reached: could not", "= {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self,", "and # limitations under the License. # import datetime import logging import time", "if job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations()", "from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def", "project, region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project, location=region, filter=\"ACTIVE\",) ) try:", "terminal state...\".format( job[\"name\"] ) logging.info(msg) time.sleep(5) msg = \"Job '{}' did not reach", "req_state, job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries > 2: msg =", "2019-2020 Spotify AB # # Licensed under the Apache License, Version 2.0 (the", "def _update_job_state(self, job, req_state=None, retries=None): if retries is None: retries = 0 _req_state", "logging.info( \"Failed to {} job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"] )", "= discovery.build(\"dataflow\", api_version) def _check_job_running(self, job_name, project, region): request = ( self._client.projects() .locations()", "retries is None: retries = 0 _req_state = JOB_STATE_MAP.get(req_state, JOB_STATE_MAP[\"cancel\"]) if job.get(\"requestedState\") is", "project, e ) ) logging.warning( \"Continuing to attempt deploying '{}'\".format(job_name) ) return job_results", "{} job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"] ) ) retries +=", "class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version", "JOB_STATE_MAP = {\"cancel\": \"JOB_STATE_CANCELLED\", \"drain\": \"JOB_STATE_DRAINED\"} class StopJob(object): def __init__(self, api_version=None): self._set_dataflow_client(api_version) def", "api_version) def _check_job_running(self, job_name, project, region): request = ( self._client.projects() .locations() .jobs() .list(projectId=project,", "< timeout: try: resp = request.execute() except Exception as e: msg = (", "job[\"name\"], e ) logging.error(msg) raise SystemExit(1) if retries > 2: msg = \"Max", "= ( self._client.projects() .locations() .jobs() .get( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], ) ) while datetime.datetime.now()", "= request.execute() except Exception as e: logging.warning( \"Could not find running job '{}'", "( self._client.projects() .locations() .jobs() .update( jobId=job[\"id\"], projectId=job[\"projectId\"], location=job[\"location\"], body=job, ) ) try: request.execute()", "datetime.datetime.now() < timeout: try: resp = request.execute() except Exception as e: msg =", "status for job '{}'. Error: {}.\\n\" \"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg)", "if job_results: for result in job_results: if result[\"name\"] == job_name: return result def", "\"Failed to {} job '{}'. Trying again after 30s...\".format( req_state, job[\"name\"] ) )", "logging.warning( \"Could not find running job '{}' in project '{}': {}\".format( job_name, project,", "\"Trying again after 5s...\".format(job[\"name\"], e) ) logging.info(msg) time.sleep(5) continue if resp[\"currentState\"] in JOB_STATE_MAP.values():", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "__init__(self, api_version=None): self._set_dataflow_client(api_version) def _set_dataflow_client(self, api_version): if not api_version: api_version = \"v1b3\" self._client", "stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project, region", "e.resp.status < 500: msg = \"Failed to {} job '{}': {}\".format( req_state, job[\"name\"],", "job.get(\"requestedState\") is not _req_state: job[\"requestedState\"] = _req_state request = ( self._client.projects() .locations() .jobs()", "[]) if job_results: for result in job_results: if result[\"name\"] == job_name: return result", "def stop(self, job_name, project, region, strategy, api_version=None): self._set_dataflow_client(api_version) current_running_job = self._check_job_running( job_name, project,", "import logging import time import emoji from googleapiclient import discovery JOB_STATE_MAP = {\"cancel\":" ]
[ "f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def onclick(event): if", "+ '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image f_AB = PlotCanvas()", "30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 =", "QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95,", "else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos =", "10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)", "#find the file extension and remove it. '/' for parent path path =", "= (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A function", "(2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY", "displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement", "Create canvas for drawing try: global f_sep f_sep = SeparationCanvas() for i in", "global s, my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select", "ang_lst[0] if ang < 0: ang = ang + 360 for i in", "onclick(event): if event.inaxes != f_ini.axes: return if event.button == 1: # Left mouse", "href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem.", "\"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to ' + my_path + title", "1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 =", "{}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to", "in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50)", "import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg", "self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60,", "factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and", "image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0],", "#A, B are lists of atom coordinates; Ua is the estimated lattice paramter", "a line function from two points A = (p1[1] - p2[1]) B =", "disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec()", "f_sep = SeparationCanvas() for i in range(9): s_factor = sep - 4 +", "y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0],", "Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0])", "#calculate the ideal atomic positions for O in a un-distorted perovskite structure #only", "Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) *", "[int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst,", "- 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position =", "'+ my_path) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "b, c, d #Find the diagonal of a M = [b,c,d] diag_idx =", "msg.setText(\"I will make this app freely available for the society.<br>\"\\ \"If you like", "Dr. <NAME>. Address your questions and suggestions to <EMAIL>. Please see the \"Disclaimer\"", "'/' for parent path path = file[:idx] + '/' return path def find_atom(img,", "tol=1.2): # Define a function to find the neighboring atoms of P(x,y) from", "msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #=========", "vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy < 0:", "else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1]", "sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A and B from the", "- ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5]", "s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if", "def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr.", "the closest node in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def", "print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path =", "vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2,", "print('Refining atom positions for A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data,", "#Set ABF toggle from the checkbox ABF = 1 else: ABF = 0", "sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B)", "460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)", "get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError:", "\"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\"", "2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) /", "self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set up global variables", "v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3,", "range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar", "#Calculate O map and save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos", "= [] if not img_110: #calculate image [001] for atom in A: Neighbor", "1: print('All figures have been saved to '+ my_path) except NameError: #Pop up", "from the csv file saved previously global s, my_path, title, scale, units, disp,", "for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============", "20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap)", "self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects. # 10x10", "x: (x[0] ** 2 + x[1] ** 2) ** 0.5) return N def", "x,y else: return False def math_center(a, b, c, d): #Define a function to", "map module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text())", "vec in vec_data_color: ang = vec[5] - ang_lst[0] if ang < 0: ang", "= QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap)", "+ 180 elif dx < 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx))", "for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def", "#img: an array of image data; ini_pos: initial positions; atom_name: a string for", "130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4", "angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020", "appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep", "100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a", "this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is", "directions. This is used to determine the coloring pattern. For single color rendering,", "disp_O, disp_atom # Read cal_site from the radio button # 0 to calculate", "======================================= def load_from_csv(self): # Load displacement data from the csv file saved previously", "version of VecMap --- a convenient tool to calculate atomic displacements in perovskite", "B site in relative to A site if self.radioButton.isChecked(): cal_site = 0 if", "+ my_path + title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "Neighbor_positions = [] if not img_110: #calculate image [001] for atom in A:", "#Unit cell parameter estimated from the image. #========================================================================= #The main scripts start from", "returnValue = msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None):", "self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\",", "openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg", "using 2D gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False)", "font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2", "20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap)", "left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the given number to determine", "here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\"))", "= 1 cal_110 = img_110 #If the input image is [110], turn this", "620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6", "of tuples ideal_positions = [] Neighbor_positions = [] if not img_110: #calculate image", "self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label =", "self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110,", "time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image #Refine O", "data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as well with open(my_path +", "a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try:", "4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 =", "self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map.", "= QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251,", "scale) #Save the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data:", "are permitted. Any redistribution must remain \"\\ \"the above copyright. When a scientific", "QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if __name__ ==", "= QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120,", "parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f}", "650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3", "parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create", "color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does", "self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was", "find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice #img: an array", "sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5)) # Create", "A from the image using 2D gaussian fit...') print('This may take time...') image_without_A", "Generate vector map module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len", "np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error", "scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px),", "ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2", "dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r',", "window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\")", "# Read cal_site from the radio button # 0 to calculate A site", "closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx =", "self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit", "to find the intersection point of two lines D = L1[0] * L2[1]", "self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine)", "cal_110 = img_110 #If the input image is [110], turn this on. O", "the vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles", "dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and", "with a perovskite structure. Support [001] and [011] zone axes. Filtered image is", "// 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec", "distribution of the vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self):", "= NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation", "IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick():", "VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button", "PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or", "good results and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys", "+ '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement", "make a ADF-like image # Draw an image global f_original_img f_original_img = PlotCanvas()", "A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions)", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to", "and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============", "self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True)", "package s = load(file) return s def getDirectory(file, s='.'): #Make the working directory", "{}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as well", "map and save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0,", "+ title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to ' +", "find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions for O in a", "by Dr. <NAME>. Address your questions and suggestions to <EMAIL>. Please see the", "\"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\",", "sep_range = list(range(sep - 4, sep + 5)) # Create canvas for drawing", "WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks", "(p1[1] - p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1])", "= 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos", "ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')", "\"First version release!<br>\" \"Get more information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\")", "to <EMAIL>. Please see the \"Disclaimer\" before use! Hope you get good results", "img_110 #If the input image is [110], turn this on. O map is", "image using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A", "= QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap)", "ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] -", "your publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\", "self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650,", "global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O =", "color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site", "coloring pattern. For single color rendering, just leave it as [0]. ang_lst =", "self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 =", "in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp", "[001] and 1 for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else:", "zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0]", "1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The", "set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so", "= QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20,", "scale print('='*50) print('Estimated lattice parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua,", "f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms'", "f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1],", "IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the list", "<a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al.", "the image pixel size #return a list of tuples ideal_positions = [] Neighbor_positions", "Look for the O data disp_atom = file[-15:-9] file_O_disp = my_path + title", "displacement directions. This is used to determine the coloring pattern. For single color", "\"If you like this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\", "file extension and remove it. '/' for parent path path = file[:idx] +", "image is [110], turn this on. O map is not supported for [110]", "packages which \"\\ \"are partially incorporated in the program. Please \"\\ \"consider citing/adding", "+ 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if plotpos == 1:", "find the closest node in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index]", "self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\"))", "image file for process. #Require Hyperspy package s = load(file) return s def", "for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data])", "the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\"", "A, B, -C def intersection(L1, L2): #A function to find the intersection point", "========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame()", "to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1", "370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False)", "'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for", "disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm), angle", "L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic", "modify the original list if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0])", "separation factors around the given number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\",", "data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4],", "self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False)", "data saved to ' + my_path + title + '-disp.csv.') except NameError: msg", "functions, do not change==== def readImage(file): #Load raw image file for process. #Require", "self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\")", "scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle", "in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r')", "QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg')", "self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730,", "self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\",", "Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100", "atom_color='r'): #Refine atom positions for a sublattice #img: an array of image data;", "((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the", "1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The", "+ (a[1]-y)**2 < (Ua * tol) **2] #A list to store the neighboring", "#==== Refine atom position module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self):", "variables global U_avg, disp, disp_O, disp_atom # Read cal_site from the radio button", "'_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all =", "image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1],", "atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining B-site", "f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except", "load_disp_data_from_csv(file) # Look for the O data disp_atom = file[-15:-9] file_O_disp = my_path", "self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit =", "csv file saved previously global s, my_path, title, scale, units, disp, disp_O, image,", "release!<br>\" \"Get more information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\")", "= QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg ,", "QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16))", "to ' + my_path + title + '-disp.csv.') except NameError: msg = QMessageBox()", "1 cal_110 = img_110 #If the input image is [110], turn this on.", "n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2,", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom position module =================================================== #====", "neighbors for plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua", "image as hdf5 file. This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5',", "ABF == 1: s.data = np.divide(1, s.data) #Inverse the ABF contrast to make", "#Refine O positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB,", "for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B')", "vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class", "numpy as np import matplotlib.pyplot as plt import math import copy from scipy.spatial", "40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line", "============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked():", "28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap)", "[] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len", "find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O.", "to add or remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return", "key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5) return N", "distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0],", "= 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since", "the given number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine", "save figures #======================= if plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site,", "plotpos == 1: print('All figures have been saved to '+ my_path) except NameError:", "= s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file", "img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF", "#lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes()", "displacement data from the csv file saved previously global s, my_path, title, scale,", "s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file)", "atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O", "title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y", "#Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar)", "Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\",", "msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue =", "#return a list of tuples ideal_positions = [] Neighbor_positions = [] if not", "16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap)", "a un-distorted perovskite structure #only support [001] images ideal_O_positions = [] for atom", "event.button == 1: # Left mouse button x = np.float(event.xdata) y = np.float(event.ydata)", "from the original image #Refine O positions print('='*50) print('Refining atom positions for sublattice", "you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============", "for [110] yet. O_map = find_O #If enabled, will calculate the displacement of", "ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save", "\"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\"", "or [011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\",", "= find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for", "'/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title", "QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41))", "on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms')", "[001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if", "file saved previously global s, my_path, title, scale, units, disp, disp_O, image, disp_atom", ", *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path, file_path,", "path path = file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'):", "VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if __name__ == \"__main__\":", "'.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been saved to '+ my_path)", "find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if", "to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp]", "\"Get more information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue", "ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos", "title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y", "image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a", "positions; atom_name: a string for name; atom_color: a string for color #img_110: For", "if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos =", "lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to the first", "91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True)", "(int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title =", "of atoms A. # P:a given atom (x,y); A: a list of atoms;", "matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as", "(a[1]-y)**2 < (Ua * tol) **2] #A list to store the neighboring atoms", "from the image print('='*50) print('Subtracting sublattice A from the image using 2D gaussian", "atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if event.button == 1:", "the original list if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set", "self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\",", "876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\")", "scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) #", "+ \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to ' + my_path", "111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation)", "A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions", "with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x", "returnValue = msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def", "math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy,", "img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter", "subplot # configuration tool in the navigation toolbar wouldn't # work. # self.axes", "a convenient tool to calculate atomic displacements in perovskite structures This app was", "A. # P:a given atom (x,y); A: a list of atoms; Ua: A", "for different images #s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation", "2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) /", "+ title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to '", "360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111,", "O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A and B", "WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for", "matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar", "px, 0.707*a for [001] and 0.5*a for [110] x, y = P N", "'.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if O_map == 1: ap_2 =", "self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\",", "================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif ,", "1: # Left mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby =", "atom position results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions',", "f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1:", "Address your questions and suggestions to <EMAIL>. Please see the \"Disclaimer\" before use!", "units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look for the", "self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap)", "return A, B, -C def intersection(L1, L2): #A function to find the intersection", "work. # self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied to the", "ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox", "> 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the", "s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path", "to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\",", "visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g.,", "atom coordinates; Ua is the estimated lattice paramter in nm; scale is the", "vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines = disp.readlines()", "disp (nm), angle (deg)\\n') for data in disp_O: disp_data.write('{}, {}, {}, {}, {},", "Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range =", "has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected", "def donate(self): msg = QMessageBox() msg.setText(\"I will make this app freely available for", "do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8", "730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28))", "ScaleBar #====Helper functions, do not change==== def readImage(file): #Load raw image file for", "msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement", "(n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2,", "atoms from the image print('='*50) print('Subtracting sublattice A from the image using 2D", "n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] +", "and B from the image using 2D gaussian fit...') print('This may take time...')", "def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame", "with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site", "in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list", "+ n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1]", "0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\",", "image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been", "= [] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if", "it as [0]. ang_lst = [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split()", "ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions for", "# -*- coding: utf-8 -*- #VecMap0.1 #The first versio of VecMap from PyQt5", "\"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation", "1 to calculate B site in relative to A site if self.radioButton.isChecked(): cal_site", "site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist()", "'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to", "== s: #find the file extension and remove it. '/' for parent path", "Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This", "atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2,", "Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue", "================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini", "120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 =", "a backup file in hspy format image = s.data if ABF == 1:", "self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate =", "* from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as", "set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off()", "0: ang = ang + 360 for i in range(len(ang_bond)-1): if round(ang) in", "try: disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas()", "About\") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 =======================================", "======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\"", "by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the given number to", "atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation", "openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF = 1 else:", "QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16))", "L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] -", "of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title", "self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330,", "create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects. #", "= np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) >", "================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed", "= QWidget() # Create the mpl Figure and FigCanvas objects. # 10x10 inches,", "v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get", "QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21))", "msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr <NAME>. Redistribution and use in", "vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with", "print('='*50) print('Subtracting sublattice A and B from the image using 2D gaussian fit...')", "return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make", "self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\")", "data from the csv file saved previously global s, my_path, title, scale, units,", "from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from", "self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\"))", "previously global s, my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name =", "Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from the csv", "# P:a given atom (x,y); A: a list of atoms; Ua: A threashold", "NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site", "msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the list of angles!\") msg.setWindowTitle(\"Hey guys\")", "self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If", "the neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for", "= openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy')", "A: a list of atoms; Ua: A threashold in px, 0.707*a for [001]", "replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp,", "hdf5 file. This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #=======================", "= find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x", "try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas()", "'.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image overlayed. f_B_site = PlotCanvas()", "(n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 =", "PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off()", "displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables", "lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2)", "+ '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O ==", "scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path", "A threashold in px, 0.707*a for [001] and 0.5*a for [110] x, y", "dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according to", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "vector map module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len =", "button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I", "take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image #Refine", "displacement of O atoms in relation to sublattice B. U_avg = (Ua +", "M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A, B,", "21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19", "= Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1]", "Hyperspy package s = load(file) return s def getDirectory(file, s='.'): #Make the working", "atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been saved to", "of A #A_com, A are lists of atom coordinates; Ua is the estimated", "error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey", "= Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap", "mathematical center of four points, a, b, c, d #Find the diagonal of", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot, we", "of angles (degrees) of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\",", "= 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try: #Refine", "= math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale", "z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice", "to ' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg =", "parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale", "f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale", "of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx]", "to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas)", "ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it", "QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16))", "f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms", "sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B =", "for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions')", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The", "VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load", "only one plot, we can use add_axes # instead of add_subplot, but then", "for name; atom_color: a string for color #img_110: For [110] image sublattice =", "**2] #A list to store the neighboring atoms N = sorted(N, key=lambda x:", "80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True)", "for i in range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off()", "28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True)", "111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\")", "calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate vector", "separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation", "file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save", "the coloring pattern. For single color rendering, just leave it as [0]. ang_lst", "list to store the neighboring atoms N = sorted(N, key=lambda x: (x[0] **", "191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 =", "original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image)", "f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1],", "for idx in range(-1, -len(file), -1): if file[idx] == s: #find the file", "scale, img_110=False): #calculate the ideal atomic positions for A in a un-distorted perovskite", "/ scale * 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor)", "in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1: scalebar =", "title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp", "nm; scale is the image pixel size disp = [] for atom in", "def closest_node(node, nodes): #A function to find the closest node in an array", "self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120,", "copyright. When a scientific publication is reached through the \"\\ \"app, please add", "positions for a sublattice #img: an array of image data; ini_pos: initial positions;", "= QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251,", "and return the path. for idx in range(-1, -len(file), -1): if file[idx] ==", "f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with", "tol) **2] #A list to store the neighboring atoms N = sorted(N, key=lambda", "only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg", "* 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0])", "axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\"))", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation module ======================================================== #==== Connected", "#========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar", "-len(file), -1): if file[idx] == s: #find the file extension and remove it.", "f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the", "'Use Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text())", "import numpy as np import matplotlib.pyplot as plt import math import copy from", "O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O", "def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes", "B from the image using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False)", "+ n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0]", "= (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3", "parent path path = file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name,", "A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining')", "scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as", "= distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy", "lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry==================================", "530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17", "+ z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from the image:') print('a", "Generate O vector map module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self):", "+ '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for", "from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def readImage(file): #Load raw", "A site in relative to B site; 1 to calculate B site in", "atom_color: a string for color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img,", "QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20))", "initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A", "math_center(a, b, c, d): #Define a function to find the mathematical center of", "image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)", "(2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9", "'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array.", "paramter in nm; scale is the image pixel size disp = [] for", "\"\\ \"the above copyright. When a scientific publication is reached through the \"\\", "\"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\", "#Find the position of O atoms if find_O == 1: #Find initial positions", "p2): #Find a line function from two points A = (p1[1] - p2[1])", "680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91,", "a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees)", "colors should match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #=========", "positions. Check [001] or [011] zone. Only check Refine Oxygen if O columns", "+ n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1]", "for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the", "module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try:", "dx, dy. if dy >= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx))", "s def getDirectory(file, s='.'): #Make the working directory and return the path. for", "with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n =", "= 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen", "take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A", "self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 =", "= openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF = 1", "vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate", "850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9", "my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale", "bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path +", "self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else:", "up global variables such as path etc. ====================== #===== Connected to self.pushButton =================================================", "self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360,", "s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title +", "_translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\",", "90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16))", "checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap", "cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 =", "the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image)", "#Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice A from the image", "msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9", "f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected to self.pushButton_2 ================================================ def", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============", "functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments)", "self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load", "str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1:", "' + my_path + title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "'/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a", "len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering", "image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a", "self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: # Read from lineEdits:", "This app was designed by Dr. <NAME>. Address your questions and suggestions to", "continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according", "disp (px), y disp (px), disp (nm), angle (deg)\\n') for data in disp:", "= get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except", "self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate", "QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1.", "\"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\",", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('')", "for data in neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0}, {1},", "A-site and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs.", "msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11", "lists of atom coordinates; Ua is the estimated lattice paramter in nm; scale", "msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load", "self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton =", "print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap)", "save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg,", "file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions", "enabled, will calculate the displacement of O atoms in relation to sublattice B.", "string for name; atom_color: a string for color #img_110: For [110] image sublattice", "PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import *", "of atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a for", "show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...')", "units, s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle", "ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site'", "NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms", "--- a convenient tool to calculate atomic displacements in perovskite structures This app", "entry================================== def main(): print('='*50) print(''' Welcome to the first version of VecMap ---", "for data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3],", "data; ini_pos: initial positions; atom_name: a string for name; atom_color: a string for", "self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460,", "if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice", "if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp:", "\"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file", "#Find a line function from two points A = (p1[1] - p2[1]) B", "== 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O =", "atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) ==", "self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize the", "B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial position of B:", "* 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions,", "atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site.", "self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580,", "Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions", "the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs.", "ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions =", "def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff", "global f_sep f_sep = SeparationCanvas() for i in range(9): s_factor = sep -", "os import numpy as np import matplotlib.pyplot as plt import math import copy", "sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions", "== 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)", "the society.<br>\"\\ \"If you like this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating", "n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale):", "print('Estimated lattice parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c", "return False def math_center(a, b, c, d): #Define a function to find the", "from the image using 2D gaussian fit...') print('This may take time...') image_without_A =", "click to add or remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes:", "atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O == 1: global", "vector map has been saved to ' + my_path + title + \"_{}_vec_map.tif!", "idx in range(-1, -len(file), -1): if file[idx] == s: #find the file extension", "{}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring", "Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey", "ang = ang + 360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i],", "for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file):", "a string for name; atom_color: a string for color #img_110: For [110] image", "ang + 360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1])", "[110] yet. O_map = find_O #If enabled, will calculate the displacement of O", "f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' +", "from here if cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom =", "Check [001] or [011] zone. Only check Refine Oxygen if O columns are", "take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom", "should match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate", "freely available for the society.<br>\"\\ \"If you like this app, show your appreciation", "disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save", "ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap')", "self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to", "self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\")", "file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom position module ===================================================", "\"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\")", "\"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around", "time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions", "from csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load", "D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] *", "41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\")", "was written with Python 3. The author \" \\ \"acknowledges the HyperSpy and", "self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191,", "structures This app was designed by Dr. <NAME>. Address your questions and suggestions", "positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path +", "Ua: A threashold in px, 0.707*a for [001] and 0.5*a for [110] x,", "A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst):", "A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale:", "for the O data disp_atom = file[-15:-9] file_O_disp = my_path + title +", "np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions", "Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\"))", "positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path +", "title, scale, units, s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set", "ini_pos: initial positions; atom_name: a string for name; atom_color: a string for color", "vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released:", "rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale)", "through the \"\\ \"app, please add the following reference: <br>\"\\ \"1. Ma, T.", "vec_data_color ang_lst_mod = [a - ang_lst[0] for a in ang_lst] ang_bond = []", "head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower", "f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title))", "button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y),", "A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining", "self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\")", "self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\",", "see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate", "path = file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine", "idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if", "img_110: #calculate image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale", "int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: global f_vec_map_O", "atoms if find_O == 1: #Find initial positions for O AB_positions = ap_A.tolist()", "match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O", "Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a", "else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist()", "return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from two points A", "sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an", "Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if", "in the program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap", "if cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0", "+ '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been", "vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io import load", "O data disp_atom = file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' +", "image.hspy', overwrite=True) #Save a backup file in hspy format image = s.data if", "130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4", "data[5])) disp_data.write('\\n') print('Atomic displacement data saved to ' + my_path + title +", "four points, a, b, c, d #Find the diagonal of a M =", "20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap)", "f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show()", "\"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me button ==================================================== #============", "and original image as hdf5 file. This file can be called later. #atom_lattice.save(my_path", "sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice A from the", "def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836))", "O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom", "import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg", "for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor)", "self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText)", "\"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\"))", "Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox()", "0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and", "inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas", "atom positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms',", "= QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow):", "given atom (x,y); A: a list of atoms; Ua: A threashold in px,", "center = intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate", "self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460,", "= find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path + title +", "msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========", "#Save neighbors for plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor =", "251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20))", "line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return center def", "A #A_com, A are lists of atom coordinates; Ua is the estimated lattice", "f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' +", "======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will make this app freely available", "angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O vector map module =============================================", "= load(file) return s def getDirectory(file, s='.'): #Make the working directory and return", "of colors should match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "+ ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work", "self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75)", "objects. # 10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0,", "msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec()", "ADF-like image # Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image)", "self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191,", "for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save", "to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom", "scale data from the image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original", "= f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError:", "1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150,", "Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox()", "= lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry================================== def", "f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1],", "range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor <", "in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] -", "show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0", "O map and save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos =", "#lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone", "units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from", "has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path = getDirectory(file,", "more information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue =", "(p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A function to", "16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\")", "positions for A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site", "head_length=a_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)", "O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector", "atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site", "\"List of angles (degrees) of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\"))", "+ title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate", "self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap)", "f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms'", "ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight')", "= msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self,", "dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx <", "the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1", "retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF", "map has been saved to ' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom))", "def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar =", "ang < 0: ang = ang + 360 for i in range(len(ang_bond)-1): if", "in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data", "string for color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name)", "B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice A", "before use! Hope you get good results and publications from it! Version 0.1.1", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom position module =================================================== #==== Connected", "810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font =", "B. U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the image.", "me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\"))", "A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2,", "58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480,", "= QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251,", "toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create the navigation toolbar,", "image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation module ========================================================", "calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg =", "is not supported for [110] yet. O_map = find_O #If enabled, will calculate", "rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in", "source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments", "else: plotpos = 0 try: #Refine atom positions print('='*50) print('Refining atom positions for", "# Left mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y),", "add_axes # instead of add_subplot, but then the subplot # configuration tool in", "ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal", "for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1:", "O vector map has been saved to ' + my_path + title +", "Acknowledgments\") returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11 =======================================", "f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms", "to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global", "self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\")", "in relative to A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site", "lattice parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c =", "atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def", "the diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx])", "atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) ==", "atomic displacement of A #A_com, A are lists of atom coordinates; Ua is", "#atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if plotpos ==", "self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810,", "+ '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been saved to '+", "lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry================================== def main():", "ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang", "self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have", "# work. # self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied to", "class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876)", "= 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom", "ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y", "NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue", "self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\")", "# self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation", "the ideal atomic positions for A in a un-distorted perovskite structure #A, B", "remain \"\\ \"the above copyright. When a scientific publication is reached through the", "atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement module =================================", "self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150,", "suggestions to <EMAIL>. Please see the \"Disclaimer\" before use! Hope you get good", "and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app =", "that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\"))", "\" \\ \"with or without modification, are permitted. Any redistribution must remain \"\\", "msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me button ==================================================== #============ Connected to", "B: typically 3 for [001] and 1 for [110] if img_110 == 1:", "rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for a in ang_lst] ang_bond", "self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText)", "20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap)", "f_B_site, f_AB #Plot A-site atom positions with the original image overlayed. f_A_site =", "relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp =", "this app freely available for the society.<br>\"\\ \"If you like this app, show", "b, c, d): #Define a function to find the mathematical center of four", "returnValue = msg.exec() #======== Display angle distribution of the vectors module =========================== #========", "== 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O", "= sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A)", "0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui", "checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos", "self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find", "msg.exec() #============ Donate me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self):", "object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to find the", "units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice", "0.707*a for [001] and 0.5*a for [110] x, y = P N =", "vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1],", "main(): print('='*50) print(''' Welcome to the first version of VecMap --- a convenient", "self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent)", "font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\")", "vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0]", "and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site", "the csv file saved previously global s, my_path, title, scale, units, disp, disp_O,", "= disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ')", "is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define figure canvas", "self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif", "Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch #", "self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20,", "self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620,", "file[idx] == s: #find the file extension and remove it. '/' for parent", "QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58))", "O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms", "in source, \" \\ \"with or without modification, are permitted. Any redistribution must", "= QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if __name__", "line(p1, p2): #Find a line function from two points A = (p1[1] -", "from the image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True)", "s.data) #Inverse the ABF contrast to make a ADF-like image # Draw an", "math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from the image:')", "self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20,", "neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate", "both A and B from the original image #Refine O positions print('='*50) print('Refining", "self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\")", "dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except", "s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot", "msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg", "self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the input image is [110],", "** 2 + x[1] ** 2) ** 0.5) return N def closest_node(node, nodes):", "vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None):", "function to find the intersection point of two lines D = L1[0] *", "the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\"))", "self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\")", "load(file) return s def getDirectory(file, s='.'): #Make the working directory and return the", "f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available", "intersection(L1, L2): #A function to find the intersection point of two lines D", "#A function to find the intersection point of two lines D = L1[0]", "+ \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement", "idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360", "51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\")", "= getDirectory(file) #Set the working path file_path = getDirectory(file, '/') #Set the parent", "from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas", "s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title +", "4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot,", "0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions", "= QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap)", "s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look for the O data", "import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def readImage(file):", "peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title +", "canvas for drawing try: global f_sep f_sep = SeparationCanvas() for i in range(9):", "= math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for atom", "0 print('No O displacement data was found! Will do {} atom displacement only!'.format(disp_atom))", "self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1", "except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\")", "file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title", "self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\")", "atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining", "16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap)", "f_AB #Plot A-site atom positions with the original image overlayed. f_A_site = PlotCanvas()", "try: #Global variables global U_avg, disp, disp_O, disp_atom # Read cal_site from the", "the mpl Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi", "un-distorted perovskite structure #only support [001] images ideal_O_positions = [] for atom in", "disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0]", "A-site atoms from the image print('='*50) print('Subtracting sublattice A from the image using", "SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def", "as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class", "Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot')", "51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap)", "# self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame)", "self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280,", "atom position module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables:", "as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px), disp", "Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0", "#Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title =", "for Hyperspy \"\\ \"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la et", "= NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and", "me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec()", "of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\",", "B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O", "# msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First", "all the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site", "self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the given number to determine the", "positions for O in a un-distorted perovskite structure #only support [001] images ideal_O_positions", "16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap)", "get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick)", "self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810,", "atoms positions for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the", "QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28))", "returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def", "and B from the original image #Refine O positions print('='*50) print('Refining atom positions", "disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data)", "+ title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px),", "9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected", "#Load raw image file for process. #Require Hyperspy package s = load(file) return", "the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox)", "** 2) ** 0.5) return N def closest_node(node, nodes): #A function to find", "for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B", "print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions,", "of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale", "O atoms if find_O == 1: #Find initial positions for O AB_positions =", "del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A,", "to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1", "dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 +", "#Refine atom positions print('='*50) print('Refining atom positions for A-site atoms...') print('This may take", "< 0: ang = ang + 360 for i in range(len(ang_bond)-1): if round(ang)", "self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp)", "QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map)", "= msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============ Connected to", "Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path", "site; 1 to calculate B site in relative to A site if self.radioButton.isChecked():", "data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as well with open(my_path", "a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2", "(2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button ==================================================== #============ Connected to", "cursor on the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg", "#find atomic displacement of A #A_com, A are lists of atom coordinates; Ua", "<EMAIL>. Please see the \"Disclaimer\" before use! Hope you get good results and", "saved to ' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg", "Ua, tol=1.2): # Define a function to find the neighboring atoms of P(x,y)", "scale * 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0,", "disp: disp_data = [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in", "self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110,", "self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font)", "ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 =", "scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All", "==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will", "disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr", "store the neighboring atoms N = sorted(N, key=lambda x: (x[0] ** 2 +", "= {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window msg = QMessageBox()", "= s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the image units =", "title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to ' +", "print('All figures have been saved to '+ my_path) except NameError: #Pop up an", "self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20))", "L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy", "0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang =", "will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\",", "= PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1,", "msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3. The author \" \\ \"acknowledges", "self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130,", "This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and", "self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150,", "self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied to the canvas #", "vec[5] - ang_lst[0] if ang < 0: ang = ang + 360 for", "#set yellow for single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for", "Welcome to the first version of VecMap --- a convenient tool to calculate", "atoms A. # P:a given atom (x,y); A: a list of atoms; Ua:", "self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20,", "relative to B site; 1 to calculate B site in relative to A", "yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\"", "can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures", "Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\",", "(*.csv);;All Files (*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/') s =", "def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue =", "2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic", "= f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window msg = QMessageBox()", "mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi =", "self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6", "s: #find the file extension and remove it. '/' for parent path path", "= ang + 360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]):", "scripts start from here if cal_site == 0:#Calculate A site disp_atom = 'A-site'", "disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50)", "math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst):", "' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox()", "f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site", "zone. Only check Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\"))", "= np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image #", "#Plot A-site atom positions with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1:", "\"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define", "self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self,", "use in source, \" \\ \"with or without modification, are permitted. Any redistribution", "ang = vec[5] - ang_lst[0] if ang < 0: ang = ang +", "from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot as", "\"<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and [011] zone", "y (px), x disp (px), y disp (px), disp (nm), angle (deg)\\n') for", "disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram", "\\ \"acknowledges the HyperSpy and Atomap packages which \"\\ \"are partially incorporated in", "\"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\")", "\"<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for refining. Adding/removing", "self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\",", "find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try:", "{}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting", "=================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice", "> 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def", "list of atoms A. # P:a given atom (x,y); A: a list of", "41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap)", "parameter estimated from the image. #========================================================================= #The main scripts start from here if", "color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec", "get good results and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import", "class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def", "returnValue = msg.exec() print('') #========= Generate vector map module ============================================= #========= Connected to", "[001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step", "atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if", "\"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages in your publication:\"\\ \"<br>\"", "image with a perovskite structure. Support [001] and [011] zone axes. Filtered image", "preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\",", "reference: <br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602", "0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2", "====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3", "0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if file: print('{}", "will calculate the displacement of O atoms in relation to sublattice B. U_avg", "+ 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n = len(data) for", "(p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def", "if vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1]", "= [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied", "f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\\n displacement directions')", "(px), disp (nm), angle (deg)\\n') for data in disp: disp_data.write('{}, {}, {}, {},", "#==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1:", "of four points, a, b, c, d #Find the diagonal of a M", "- ang_lst[0] if ang < 0: ang = ang + 360 for i", "in nm; scale is the image pixel size #return a list of tuples", "self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions", "4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s,", "image. #========================================================================= #The main scripts start from here if cal_site == 0:#Calculate A", "called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if", "= QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181,", "of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title", "T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma,", "of P(x,y) from a list of atoms A. # P:a given atom (x,y);", "# self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame)", "donate(self): msg = QMessageBox() msg.setText(\"I will make this app freely available for the", "self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20,", "data from the image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy',", "================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg,", "saved to ' + my_path + title + '-disp.csv.') except NameError: msg =", "to see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a", "= QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80,", "Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report", "the refined positions and original image as hdf5 file. This file can be", "color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify the", "'_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image overlayed.", "#============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg =", "button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information)", "= [a - ang_lst[0] for a in ang_lst] ang_bond = [] for idx", "self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20,", "data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to ' +", "\"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\",", "sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions", "ideal_pos, scale) #Save the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as", "111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 =", "self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121,", "lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(',", "self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map)", "2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) /", "there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms", "#===== Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 ,", "have been saved to '+ my_path) except NameError: #Pop up an error window", "copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not", "= 0 if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the", "open(file,'r') as disp: disp_data = [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for", "c, d #Find the diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax()", "self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20,", "D return x,y else: return False def math_center(a, b, c, d): #Define a", "so it does not modify the original list if len(ang_lst) == 1: for", "readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the image", "SeparationCanvas() for i in range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal')", "== 0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist()", "return ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale *", "#Plot all the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs.", "math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50)", "print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50)", "in neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n')", "#Construct atom position results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms", "Read cal_site from the radio button # 0 to calculate A site in", "directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\")", "sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost", "msg.exec() print('') #========= Generate vector map module ============================================= #========= Connected to self.pushButton_6 ===========================================", "10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout #", "+ title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the", "the working directory and return the path. for idx in range(-1, -len(file), -1):", "atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining O", "plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions with the original image", "f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop", "1 else: s_bar = 0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split()", "L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0]", "s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look for", "print('The O vector map has been saved to ' + my_path + title", "atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site", "map module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text())", "positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors", "self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O =", "on. O map is not supported for [110] yet. O_map = find_O #If", "= s.data if ABF == 1: s.data = np.divide(1, s.data) #Inverse the ABF", "\"Disclaimer\" before use! Hope you get good results and publications from it! Version", "given number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom", "extension and remove it. '/' for parent path path = file[:idx] + '/'", "+ '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O =", "= [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find", "atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input", "y = P N = [a for a in A if (a[0]-x)**2 +", "= closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini)", "B, Ua, scale): #calculate the ideal atomic positions for O in a un-distorted", "U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the image. #=========================================================================", "be changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys to find an", "#If the input image is [110], turn this on. O map is not", "input image is [110], turn this on. O map is not supported for", "#===== Open file and set up global variables such as path etc. ======================", "B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms')", "load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom", "460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2", "i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data)", "and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx", "\"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for", "= QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131,", "if dy >= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy", "==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions", "= (p1[1] - p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] -", "ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for", "N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua", "the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement module", "dy >= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif", "QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16))", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the list of angles!\") msg.setWindowTitle(\"Hey", "self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40,", "41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap)", "self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 =", "atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')", "o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2)", "\"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information", "self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining", "P(x,y) from a list of atoms A. # P:a given atom (x,y); A:", "if find_O == 1: #Find initial positions for O AB_positions = ap_A.tolist() +", "msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #====", "to find the neighboring atoms of P(x,y) from a list of atoms A.", "= ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 =", "points A = (p1[1] - p2[1]) B = (p2[0] - p1[0]) C =", "[110] x, y = P N = [a for a in A if", "a function to find the neighboring atoms of P(x,y) from a list of", "of O atoms if find_O == 1: #Find initial positions for O AB_positions", "[001] images ideal_O_positions = [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua /", "= (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2", "disp = [] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom)", "in a un-distorted perovskite structure #only support [001] images ideal_O_positions = [] for", "= msg.exec() #============ Load displacement from csv module ==================================== #============ Connected to self.pushButton_7", "PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off()", "self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check", "which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \"", "= getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s =", "Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on", "= load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else: find_O = 0", "atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os", "print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy", "self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280,", "10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi)", "if find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions", "Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: n_0 =", "n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop()", "ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) //", "guys\") returnValue = msg.exec() #==== Find separation module ======================================================== #==== Connected to self.pushButton_3", "B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show()", "Uc)/2 #Unit cell parameter estimated from the image. #========================================================================= #The main scripts start", "C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A", "it does not modify the original list if len(ang_lst) == 1: for vec", "A, Ua, tol=1.2): # Define a function to find the neighboring atoms of", "img_110=False): #calculate the ideal atomic positions for A in a un-distorted perovskite structure", "atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle distribution of", "name; atom_color: a string for color #img_110: For [110] image sublattice = Sublattice(ini_pos,", "= QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181,", "NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper", "QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41))", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue", "self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap)", "was designed by Dr. <NAME>. Address your questions and suggestions to <EMAIL>. Please", "os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else: find_O", "#The main scripts start from here if cal_site == 0:#Calculate A site disp_atom", "'-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\")", "atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O", "la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al.", "= get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event',", "by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\",", "paramter in nm; scale is the image pixel size #return a list of", "self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650,", "[110], turn this on. O map is not supported for [110] yet. O_map", "QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780,", "= msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self):", "estimated from the image. #========================================================================= #The main scripts start from here if cal_site", "+ x[1] ** 2) ** 0.5) return N def closest_node(node, nodes): #A function", "QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51))", "print('The vector map has been saved to ' + my_path + title +", "if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF = 1 else: ABF", "1 for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis =", "self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap)", "data!') else: find_O = 0 print('No O displacement data was found! Will do", "atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image f_AB =", "both A-site and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms", "0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1", "= QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue", "self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap)", "may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image", "f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2,", "saved to '+ my_path) except NameError: #Pop up an error window msg =", "the program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages", "et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue", "ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with", "[110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate", "load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian", "\"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to", "<br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\", "parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create", "print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for", "may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!')", "# Create the navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas,", "ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list = []", "working directory and return the path. for idx in range(-1, -len(file), -1): if", "hspy format image = s.data if ABF == 1: s.data = np.divide(1, s.data)", "if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110", "\"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine", "if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try: #Refine atom positions", "== 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0", "f_sep.show() except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions", "f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar", "f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data)", "atom position module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep =", "returnValue = msg.exec() #==== Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================", "- p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A function to find", "f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show()", "f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey", "'-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1", "def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to find the neighboring", "= vec[5] - ang_lst[0] if ang < 0: ang = ang + 360", "as path etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name =", "sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2)", "381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 =", "+ my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "= FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot, we can use", "n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] +", "225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow", "Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in", "onclick) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No", "backup file in hspy format image = s.data if ABF == 1: s.data", "refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def onclick(event):", "#Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked():", "the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the", "+ Uc)/2 #Unit cell parameter estimated from the image. #========================================================================= #The main scripts", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the list of angles!\")", "QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130,", "format image = s.data if ABF == 1: s.data = np.divide(1, s.data) #Inverse", "+ z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated", "appropriate separation factor to initialize the atom positions for refining. Adding/removing atoms by", "not change==== def readImage(file): #Load raw image file for process. #Require Hyperspy package", "from the image using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract", "al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et", "for A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms')", "2) ** 0.5) return N def closest_node(node, nodes): #A function to find the", "= ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1", "vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate vector map module =============================================", "f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up", "currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A and", "FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig =", "an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a", "ideal atomic positions for A in a un-distorted perovskite structure #A, B are", "self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230,", "find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else:", "330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10", "not modify the original list if len(ang_lst) == 1: for vec in vec_data_color:", "(x[0] ** 2 + x[1] ** 2) ** 0.5) return N def closest_node(node,", "self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the", "VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41))", "closest node in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1,", "replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos)", "your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1:", "L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D !=", "P N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 <", "supported for [110] yet. O_map = find_O #If enabled, will calculate the displacement", "f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show()", "different images #s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor'", "360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data,", "the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle distribution", "self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\"))", "data disp_atom = file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' + disp_atom", "\"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize the atom", "Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot", "available for the society.<br>\"\\ \"If you like this app, show your appreciation by", "= QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step", "Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep)", "Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions for O", "/ 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0])", "if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) ==", "to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app", "array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...')", "'.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site", "= 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the input", "< 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy", "* L2[0] if D != 0: x = Dx / D y =", "my_path = getDirectory(file) #Set the working path file_path = getDirectory(file, '/') #Set the", "find_O = 1 print('Found O displacement data!') else: find_O = 0 print('No O", "returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors", "the ABF contrast to make a ADF-like image # Draw an image global", "separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure", "self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine)", "name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P,", "structure #A, B are lists of atom coordinates; Ua is the estimated lattice", "math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy < 0: vec_ang =", "= QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20,", "{} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale)", "p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1, L2):", "'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array.", "= A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left", "atom positions with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions", "= find_O #If enabled, will calculate the displacement of O atoms in relation", "def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines = disp.readlines() print('Displacement", "def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy", "51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText)", "= s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp =", "and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2.", "A are lists of atom coordinates; Ua is the estimated lattice paramter in", "map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load", "QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20))", "center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic positions for", "in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 /", "#Inverse the ABF contrast to make a ADF-like image # Draw an image", "Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image", "#sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images", "array of image data; ini_pos: initial positions; atom_name: a string for name; atom_color:", "0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try: #Refine atom", "for vec in vec_data_color: ang = vec[5] - ang_lst[0] if ang < 0:", "dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color", "two points A = (p1[1] - p2[1]) B = (p2[0] - p1[0]) C", ">= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0:", "to A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1", "s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy format image", "openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if", "found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected", "#Range might be changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys to", "= (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1", "Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi = 100", "Initialize atom position module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep", "a ADF-like image # Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file)", "the image print('='*50) print('Subtracting sublattice A from the image using 2D gaussian fit...')", "PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import", "msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate", "position module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global", "f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6],", "to dx, dy. if dy >= 0 and dx >= 0: vec_ang =", "position of B: typically 3 for [001] and 1 for [110] if img_110", "\"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>.", "msg.exec() #======== Display angle distribution of the vectors module =========================== #======== Connected to", "FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow):", "setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20,", "list(range(sep - 4, sep + 5)) # Create canvas for drawing try: global", "it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap =", "QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16))", "=================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self):", "the image using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both", "import ScaleBar #====Helper functions, do not change==== def readImage(file): #Load raw image file", "191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 =", "displacement data saved to ' + my_path + title + '-disp.csv.') except NameError:", "For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return", "L2 = line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A, B, Ua,", "self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 =", "\"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\")", "factors around the given number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step", "for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2]", "Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\",", "file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation module ======================================================== #====", "5)) # Create canvas for drawing try: global f_sep f_sep = SeparationCanvas() for", "as neighbor_data: for data in neighbor_pos: n = len(data) for idx in range(n):", "f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been", "print('='*50) print(''' Welcome to the first version of VecMap --- a convenient tool", "06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui =", "o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0)", "QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True)", "self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20,", "{}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved", "NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position", "print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site", "Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\", "= msg.exec() #==== Refine atom position module =================================================== #==== Connected to self.pushButton_4 ================================================", "bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me", "getDirectory(file, s='.'): #Make the working directory and return the path. for idx in", "= Dx / D y = Dy / D return x,y else: return", "image pixel size #return a list of tuples ideal_positions = [] Neighbor_positions =", "= sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A and B from", "2D gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine", "#=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1:", "20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 =", "will make this app freely available for the society.<br>\"\\ \"If you like this", "OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using", "global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in", "= [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of", "print('='*50) print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites',", "QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41))", "f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color:", "the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file: my_path =", "============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist", "image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for", "plotpos = 0 try: #Refine atom positions print('='*50) print('Refining atom positions for A-site", "self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select", "the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create the", "self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine)", "image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file =", "is the estimated lattice paramter in nm; scale is the image pixel size", "= PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')", "lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to", "#============ Donate me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg", "NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue", "= 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative", "#========= Generate O vector map module ============================================= #========= Connected to self.pushButton_14 =========================================== def", "ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of", "self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\"))", "figures have been saved to '+ my_path) except NameError: #Pop up an error", "#============ Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will make", "sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 =", "f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off()", "Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button", "self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton", "VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap)", "371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\")", "self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130,", "self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions.", "PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off()", "import math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper", "= [a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua *", "[] Neighbor_positions = [] if not img_110: #calculate image [001] for atom in", "self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680,", "c, d): #Define a function to find the mathematical center of four points,", "A and B from the image using 2D gaussian fit...') print('This may take", "loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected to self.pushButton_2 ================================================", "atomic positions for O in a un-distorted perovskite structure #only support [001] images", "are lists of atom coordinates; Ua is the estimated lattice paramter in nm;", "of add_subplot, but then the subplot # configuration tool in the navigation toolbar", "returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============ Connected", "self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3", "closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def", "\"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set up global variables such", "atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with sublattice A", "= set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image)", "*.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title,", "= len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map", "-1): if file[idx] == s: #find the file extension and remove it. '/'", "51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\")", "is the image pixel size disp = [] for atom in A_com: arrow_end", "f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2,", "vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify the original", "as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self)", "280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16))", "41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 =", "= QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111,", "and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and", "ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight')", "atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot as plt", "structure #only support [001] images ideal_O_positions = [] for atom in A: Neighbor", "QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io", "from the checkbox ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110", "f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0],", "initial positions; atom_name: a string for name; atom_color: a string for color #img_110:", "neighboring atoms of P(x,y) from a list of atoms A. # P:a given", "[] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 +", "module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if", "Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic", "PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0],", "return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to the first version", "atoms.csv','w') as neighbor_data: for data in neighbor_pos: n = len(data) for idx in", "f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='')", "#Refine atom positions for a sublattice #img: an array of image data; ini_pos:", "function from two points A = (p1[1] - p2[1]) B = (p2[0] -", "my_path, file_path, title, scale, units, s, image, ABF, img_110 file = openfile_name[0] if", "of two lines D = L1[0] * L2[1] - L1[1] * L2[0] Dx", "Please see the \"Disclaimer\" before use! Hope you get good results and publications", "2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in", "msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine", "guys\") returnValue = msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13", "0.5) return N def closest_node(node, nodes): #A function to find the closest node", "(n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2,", "disp_data.write('\\n') print('Atomic displacement data saved to ' + my_path + title + '-disp.csv.')", "# Since we have only one plot, we can use add_axes # instead", "to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep", "result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone.", "= 'Use Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep =", "href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS", "[] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data =", "remove it. '/' for parent path path = file[:idx] + '/' return path", "the input image is [110], turn this on. O map is not supported", "<a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue", "by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\"))", "self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open", "title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O", "Any redistribution must remain \"\\ \"the above copyright. When a scientific publication is", "Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range", "z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from the image:') print('a =", "ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining O atoms", "580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5", "= am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original image as", "= (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions", "print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g')", "disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst", "et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a", "Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\",", "O in a un-distorted perovskite structure #only support [001] images ideal_O_positions = []", "#VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\",", "turn this on. O map is not supported for [110] yet. O_map =", "A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial", "PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame =", "def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos)", "cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the", "program was written with Python 3. The author \" \\ \"acknowledges the HyperSpy", "to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me button", "zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of", "def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget()", "variables such as path etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self):", "displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement from csv", "5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f):", "41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap)", "title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom", "M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 =", "an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4,", "= getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale =", "/ scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector", "= 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom", "+ title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp)", "+ n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1]", "(px), x disp (px), y disp (px), disp (nm), angle (deg)\\n') for data", "550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18", "NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the", "ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos,", "{}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement", "self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550,", "guys\") returnValue = msg.exec() #============ Load displacement from csv module ==================================== #============ Connected", "scientific publication is reached through the \"\\ \"app, please add the following reference:", "diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center =", "{}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms", "scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path +", "= QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file:", "f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' +", "2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def", "such as path etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name", "sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined", "cal_site from the radio button # 0 to calculate A site in relative", "find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title", "= QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381,", "QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51))", "FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot, we can use add_axes", "*.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title, scale, units, s, image,", "* tol) **2] #A list to store the neighboring atoms N = sorted(N,", "** 0.5) return N def closest_node(node, nodes): #A function to find the closest", "vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions ===================================", "= math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx))", "= distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from", "ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title +", "f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3)", "= QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111,", "sublattice A and B from the image using 2D gaussian fit...') print('This may", "QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41))", "f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid", "closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from two points A =", "self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\")", "round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6:", "L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] *", "file_path = getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s", "ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else:", "support [001] images ideal_O_positions = [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua", "\"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector", "of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from", "find_O = 0 print('No O displacement data was found! Will do {} atom", "\"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for", "file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path", "for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions", "for the initial position of B: typically 3 for [001] and 1 for", "image using 2D gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A,", "-*- #VecMap0.1 #The first versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets", "L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return", "to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try", "= closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx", "#==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try:", "s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1:", "510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15", "around the given number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3.", "the file extension and remove it. '/' for parent path path = file[:idx]", "first versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import", "for parent path path = file[:idx] + '/' return path def find_atom(img, ini_pos,", "s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot", "plt import math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar", "distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by", "- L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0]", "line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False):", "self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\")", "211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 =", "atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A,", "for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst)", "#calculate image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale *", "around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that will be colored", "self.canvas.setParent(self.main_frame) # Since we have only one plot, we can use add_axes #", "FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for", "f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar == 1: scalebar", "Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2.", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue", "+ 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image", "add_subplot, but then the subplot # configuration tool in the navigation toolbar wouldn't", "''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap()", "the position of O atoms if find_O == 1: #Find initial positions for", "range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if O_map ==", "atomic displacements in perovskite structures This app was designed by Dr. <NAME>. Address", "a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global", "# Create canvas for drawing try: global f_sep f_sep = SeparationCanvas() for i", "atom positions with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions", "Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: ap_center =", "(*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path +", "msg.exec() #========= Generate O vector map module ============================================= #========= Connected to self.pushButton_14 ===========================================", "<a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec()", "= line(M[0],M[1]) center = intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale,", "for plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua /", "= QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251,", "the image. #========================================================================= #The main scripts start from here if cal_site == 0:#Calculate", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the list of", "= QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if __name__ == \"__main__\": main()", "exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement from csv module ====================================", "IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox()", "best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\",", "B-site atom positions with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined", "self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20,", "self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 =", "= file[:idx] + '/' return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom", "QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10,", "to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files", "{}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms", "== 1: # Left mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby", "publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\"", "=========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for", "an appropriate separation factor to initialize the atom positions for refining. Adding/removing atoms", "print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined", "the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0],", "= str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas()", "else: find_O = 0 print('No O displacement data was found! Will do {}", "s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in", "self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650,", "x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby])", "the mathematical center of four points, a, b, c, d #Find the diagonal", "= 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def", "self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 =", "font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95,", "to find the closest node in an array closest_index = distance.cdist([node], nodes).argmin() return", "a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show()", "Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from", "data:\\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data", "#Make a copy so it does not modify the original list if len(ang_lst)", "ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for", "atoms in relation to sublattice B. U_avg = (Ua + Uc)/2 #Unit cell", "Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT", "def find_displacement(A, A_com, scale): #find atomic displacement of A #A_com, A are lists", "#Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O = 0 if self.checkBox_3.isChecked():", "msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr <NAME>. Redistribution and use", "Files (*)') global file, my_path, file_path, title, scale, units, s, image, ABF, img_110", "= line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2) return center", "'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image =", "or remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if event.button", "angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\\n", "loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path = getDirectory(file, '/') #Set", "#============ Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg =", "image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return", "report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate", "= QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371,", "cal_site = 1 cal_110 = img_110 #If the input image is [110], turn", "the parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title", "module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if", "positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O =", "msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick)", "About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox()", "QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21))", "print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis)", "self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine Oxygen", "overwrite=True) #Save a backup file in hspy format image = s.data if ABF", "displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate vector map module", "371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\")", "dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig)", "360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec", "to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from the csv file", "Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to", "#Define a function to find the mathematical center of four points, a, b,", "in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))", "\"\\ \"app, please add the following reference: <br>\"\\ \"1. Ma, T. et al.", "self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something", "self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\",", "0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms')", "disp_atom = file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' + disp_atom +", "list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of A #A_com,", "later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if plotpos", "= 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate", "a in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1]", "disp (px), disp (nm), angle (deg)\\n') for data in disp: disp_data.write('{}, {}, {},", "from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\",", "toggle from the checkbox ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked():", "6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = []", "image = s.data if ABF == 1: s.data = np.divide(1, s.data) #Inverse the", "\"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring pattern by", "self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150,", "rendering, just leave it as [0]. ang_lst = [int(a) for a in ang_lst]", "Refine atom position module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global", "Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button ====================================================", "(Ua * tol) **2] #A list to store the neighboring atoms N =", "650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position)", "a few separation factors around the given number to determine the best separation", "os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale", "91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True)", "cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom # Read cal_site from", "======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by", "480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13", "app freely available for the society.<br>\"\\ \"If you like this app, show your", "point of two lines D = L1[0] * L2[1] - L1[1] * L2[0]", "# instead of add_subplot, but then the subplot # configuration tool in the", "return path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice", "y disp (px), disp (nm), angle (deg)\\n') for data in disp_O: disp_data.write('{}, {},", "questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec()", "canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def", "from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap", "i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color:", "arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue", "Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua,", "return s def getDirectory(file, s='.'): #Make the working directory and return the path.", "not supported for [110] yet. O_map = find_O #If enabled, will calculate the", "#calculate the ideal atomic positions for A in a un-distorted perovskite structure #A,", "ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] - ang_lst[0] if ang <", "= QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91,", "if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: # Read from", "am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original image as hdf5", "QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20))", "217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115,", "= sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining B-site atoms done!')", "image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data", "atoms of P(x,y) from a list of atoms A. # P:a given atom", "VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91,", "my_path + title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O =", "- ang_lst[0] for a in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1):", "perovskite structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\",", "disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else: find_O =", "{}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data", "f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)')", "o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1)", "95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21", "10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken)", "without modification, are permitted. Any redistribution must remain \"\\ \"the above copyright. When", "= ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0", "list of displacement directions. This is used to determine the coloring pattern. For", "scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O", "28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap)", "else: return False def math_center(a, b, c, d): #Define a function to find", "16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap)", "A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110", "/ 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions", "#Save the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list", "f_sep f_sep = SeparationCanvas() for i in range(9): s_factor = sep - 4", "Donate me!\") returnValue = msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def", "ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color", "251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 =", "= (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C", "QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16))", "angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate vector map module ============================================= #=========", "returnValue = msg.exec() #==== Refine atom position module =================================================== #==== Connected to self.pushButton_4", "atom positions for a sublattice #img: an array of image data; ini_pos: initial", "displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self):", "units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy format", "= sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining O atoms done!')", "lines D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2]", "A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: n_0", "find_O #If enabled, will calculate the displacement of O atoms in relation to", "disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower", "[b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center", "atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list", "displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read", "self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750,", "here if cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom = 'B-site'", "171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 =", "find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep -", "neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the", "D != 0: x = Dx / D y = Dy / D", "A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1],", "/ 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0])", "[001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice A", "f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show()", "-*- coding: utf-8 -*- #VecMap0.1 #The first versio of VecMap from PyQt5 import", "#calculate the displacement vector angle according to dx, dy. if dy >= 0", "21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2", "0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for", "\"Scale bar\")) #===== Open file and set up global variables such as path", "'-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px),", "atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np", "#==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O,", "colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\"))", "181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 =", "f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')", "A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off()", "== 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom", "s.data disp = load_disp_data_from_csv(file) # Look for the O data disp_atom = file[-15:-9]", "neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if O_map == 1:", "self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\")", "is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\"))", "in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if O_map", "= P N = [a for a in A if (a[0]-x)**2 + (a[1]-y)**2", "== 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B,", "from the radio button # 0 to calculate A site in relative to", "a function to find the mathematical center of four points, a, b, c,", "Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n", "change==== def readImage(file): #Load raw image file for process. #Require Hyperspy package s", "time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A =", "U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path", "ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for atom in A:", "{}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as", "= intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the", "SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self):", "start from here if cal_site == 0:#Calculate A site disp_atom = 'A-site' rel_atom", "B are lists of atom coordinates; Ua is the estimated lattice paramter in", "data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as well with open(my_path + 'neighboring", "partially incorporated in the program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\", "+ ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color:", "def main(): print('='*50) print(''' Welcome to the first version of VecMap --- a", "ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show()", "QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260,", "self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9", "= Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] +", "A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions", "+ '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all", "atom positions. Check [001] or [011] zone. Only check Refine Oxygen if O", "file = openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original", "self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150,", "s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from", "scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path", "of atom coordinates; Ua is the estimated lattice paramter in nm; scale is", "except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load", "A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for", "factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor", "self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile)", "canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors')", "have only one plot, we can use add_axes # instead of add_subplot, but", "positions with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of", "ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2", "initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a", "QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20))", "been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected to", "displacement vector angle according to dx, dy. if dy >= 0 and dx", "as plt import math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import", "Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will make this", "\"<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the vector angle", "381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 =", "== 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3", "the intersection point of two lines D = L1[0] * L2[1] - L1[1]", "print('No O displacement data was found! Will do {} atom displacement only!'.format(disp_atom)) #============", "\"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with", "open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp", "from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used", "except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match the", "#==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range)", "180 elif dx < 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) +", "Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add", "\"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\"))", "#============ Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap", "s_bar = 0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list", "Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information and<br> source", "= Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) /", "f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for", "self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110,", "#======================= if plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB", "if plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot", "size #return a list of tuples ideal_positions = [] Neighbor_positions = [] if", "= int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: #", "def math_center(a, b, c, d): #Define a function to find the mathematical center", "(deg)\\n') for data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2],", "an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\")", "vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles =", "16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap)", "81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 =", "data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to ' + my_path + title", "and 0.5*a for [110] x, y = P N = [a for a", "4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions", "- 4, sep + 5)) # Create canvas for drawing try: global f_sep", "f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an", "get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw()", "image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation", "def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar =", "try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image)", "\"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\"", "been saved to ' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError:", "B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy", "[011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize", "to find the mathematical center of four points, a, b, c, d #Find", "original image as hdf5 file. This file can be called later. #atom_lattice.save(my_path +", "find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data", "= remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for sublattice", "msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow):", "f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' +", "disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to the first version of", "Load displacement data from the csv file saved previously global s, my_path, title,", "please add the following reference: <br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys.", "factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result", "\"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set up global", "+ math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst,", "#Save the neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data:", "image data; ini_pos: initial positions; atom_name: a string for name; atom_color: a string", "\"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages in your publication:\"\\", "in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to the", "self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370,", "from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units))", "============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked():", "structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\"))", "= 1 else: s_bar = 0 try: # Read from lineEdits: ang_lst =", "n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] +", "estimated lattice paramter in nm; scale is the image pixel size #return a", "raw image file for process. #Require Hyperspy package s = load(file) return s", "the image using 2D gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image,", "if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path", "sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2", "= QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111,", "number to determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\"))", "else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0", "publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv)", "= QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41,", "array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line", "if available if find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1:", "Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl.", "<NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\"))", "load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else: find_O = 0 print('No", "self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\")", "L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] *", "[lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement", "self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510,", "\"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more", "1 else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos =", "self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20,", "self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will make this app freely", "Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate", "f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and", "16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap)", "self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def", "etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph", "= 0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of", "= find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor)", "title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original", "A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click", "#A_com, A are lists of atom coordinates; Ua is the estimated lattice paramter", "self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes =", "680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13", "for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct", "sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc =", "closest_node(node, nodes): #A function to find the closest node in an array closest_index", "+ B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002)", "find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center)", "0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0],", "D y = Dy / D return x,y else: return False def math_center(a,", "\"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale", "self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\",", "a list of tuples ideal_positions = [] Neighbor_positions = [] if not img_110:", "color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window msg", "are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\",", "O displacement data was found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer", "distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def readImage(file): #Load", "something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that will be", "self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load", "\"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\"))", "files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)')", "+ 'Original image.hspy', overwrite=True) #Save a backup file in hspy format image =", "show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0", "msg.setText(\"The list of colors should match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue", "QMessageBox() msg.setText(\"I will make this app freely available for the society.<br>\"\\ \"If you", "for [001] and 0.5*a for [110] x, y = P N = [a", "ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx])", "== 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions,", "cell parameter estimated from the image. #========================================================================= #The main scripts start from here", "if plotpos == 1: print('All figures have been saved to '+ my_path) except", "from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import", "arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx, dy. if dy >=", "'-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px),", "Dr <NAME>. Redistribution and use in source, \" \\ \"with or without modification,", "determine the best separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine", "*.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path,", "cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window msg =", "color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms'", "packages in your publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2", "QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20))", "self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red", "neighbor_data.write('\\n') #Calculate O map and save if O_map == 1: ap_2 = ap_O.tolist()", "self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850,", "Atomap packages which \"\\ \"are partially incorporated in the program. Please \"\\ \"consider", "disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n')", "self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\")", "s='.'): #Make the working directory and return the path. for idx in range(-1,", "80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 =", "\"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\"))", "=========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar", "find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue') ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site.", "def line(p1, p2): #Find a line function from two points A = (p1[1]", "+ '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image overlayed. f_B_site =", "import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import", "= Dy / D return x,y else: return False def math_center(a, b, c,", "else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return", "the neighboring atoms N = sorted(N, key=lambda x: (x[0] ** 2 + x[1]", "to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might", "L2[0] if D != 0: x = Dx / D y = Dy", "4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of", "data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement from csv module", "#======== Display angle distribution of the vectors module =========================== #======== Connected to self.pushButton_5", "16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap)", "for O in a un-distorted perovskite structure #only support [001] images ideal_O_positions =", "#Refined atoms positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding", "in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0])", "= arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx, dy. if dy", "20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap)", "xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5, color='r', ls='') cid =", "p2[0]*p1[1]) return A, B, -C def intersection(L1, L2): #A function to find the", "msg.exec() #==== Refine atom position module =================================================== #==== Connected to self.pushButton_4 ================================================ def", "(2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902", "sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001]", "refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the", ">= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx", "tool to calculate atomic displacements in perovskite structures This app was designed by", "QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51))", "#========= Generate vector map module ============================================= #========= Connected to self.pushButton_6 =========================================== def show_vec_map(self):", "for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if", "disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O", "for process. #Require Hyperspy package s = load(file) return s def getDirectory(file, s='.'):", "sep + 5)) # Create canvas for drawing try: global f_sep f_sep =", "= (Ua + Uc)/2 #Unit cell parameter estimated from the image. #========================================================================= #The", "Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)]", "= QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101,", "def onclick(event): if event.inaxes != f_ini.axes: return if event.button == 1: # Left", "load_from_csv(self): # Load displacement data from the csv file saved previously global s,", "======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed", "= msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self):", "self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530,", "return the path. for idx in range(-1, -len(file), -1): if file[idx] == s:", "my_path + title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine", "tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar)", "sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to find", "2 + x[1] ** 2) ** 0.5) return N def closest_node(node, nodes): #A", "parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl", "[011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011]", "we have only one plot, we can use add_axes # instead of add_subplot,", "not img_110: #calculate image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua /", "self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate", "= PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency')", "self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\")", "else: find_O = 0 if self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0", "QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() #", "= QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from", "self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here", "self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20,", "NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\")", "Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar =", "find the mathematical center of four points, a, b, c, d #Find the", "\" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and", "+ title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O", "1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 =", "math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions,", "sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O)", "= QWidget() # Create the mpl Figure and FigCanvas objects. # 5x4 inches,", "ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 =", "find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px),", "__init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024,", "O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with sublattice", "<a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct.", "data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/') s", "self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects. # 5x4", "result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions with the original", "self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the", "original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image)", "the O data disp_atom = file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_'", "distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1, L2)", "#Initial positions of O print('='*50) print('Subtracting sublattice A and B from the image", "self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4", "mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if", "= sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc", "atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if find_O == 1:", "get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy", "gaussian fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site", "def intersection(L1, L2): #A function to find the intersection point of two lines", "Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path", "QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file: my_path", "atom positions print('='*50) print('Refining atom positions for A-site atoms...') print('This may take time...')", "self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap)", "f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title", "#========================================================================= #The main scripts start from here if cal_site == 0:#Calculate A site", "#atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original image", "self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one plot, we can", "QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91))", "as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos:", "Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is", "= int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5)) # Create canvas", "==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This", "111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 =", "file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp):", "QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300,", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "= QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120,", "file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF =", "Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy", "open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp", "to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox() msg.setText(\"I will make this app", "href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me button ==================================================== #============ Connected", "image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and", "scale * 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save", "use! Hope you get good results and publications from it! Version 0.1.1 06/13/2020", "self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\")", "returnValue = msg.exec() #========= Generate O vector map module ============================================= #========= Connected to", "= str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine the", "101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 =", "distance.euclidean(arrow_end,atom) if vec_len > 0.14 / scale: continue dx = arrow_end[0]-atom[0] dy =", "used to determine the coloring pattern. For single color rendering, just leave it", "for a sublattice #img: an array of image data; ini_pos: initial positions; atom_name:", "atoms as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in", "overwrite=True) #======================= #Plot and save figures #======================= if plotpos == 1: print('='*50) print('Saving", "f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30)))", "= 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add", "= sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5)", "\\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\")", "251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51))", "node in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2):", "self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap)", "if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return", "* L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy =", "axis for the initial position of B: typically 3 for [001] and 1", "[self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to", "d): #Define a function to find the mathematical center of four points, a,", "self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a", "QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21))", "typically 3 for [001] and 1 for [110] if img_110 == 1: zone_axis", "the navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox", "= math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average) from the", "'.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1:", "module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global", "def getDirectory(file, s='.'): #Make the working directory and return the path. for idx", "range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return", "KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank", "* import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import", "guys\") returnValue = msg.exec() #======== Display angle distribution of the vectors module ===========================", "self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7", "self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90,", "- ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1])", "import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import", "all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer)", "f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec", "= QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\", "* L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2]", "estimated lattice paramter in nm; scale is the image pixel size disp =", "s = load(file) return s def getDirectory(file, s='.'): #Make the working directory and", "color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not", "+ 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2", "code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button", "inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas", "self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact)", "angle (deg)\\n') for data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1],", "s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units", "disp_data: disp_data.write('x (px), y (px), x disp (px), y disp (px), disp (nm),", "angle (deg)\\n') for data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1],", "= L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2]", "9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) #", "utf-8 -*- #VecMap0.1 #The first versio of VecMap from PyQt5 import QtCore, QtGui,", "1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation =", "acknowledgement for Hyperspy \"\\ \"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la", "0 if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set the working", "3 for [001] and 1 for [110] if img_110 == 1: zone_axis =", "\"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which", "(px), y disp (px), disp (nm), angle (deg)\\n') for data in disp_O: disp_data.write('{},", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O vector map module ============================================= #=========", "information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec()", "positions for A in a un-distorted perovskite structure #A, B are lists of", "/ scale * 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 =", "self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist)", "return vec_data_color ang_lst_mod = [a - ang_lst[0] for a in ang_lst] ang_bond =", "HyperSpy and Atomap packages which \"\\ \"are partially incorporated in the program. Please", "# Load displacement data from the csv file saved previously global s, my_path,", "1: #Find initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB =", "global variables such as path etc. ====================== #===== Connected to self.pushButton ================================================= def", "self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\")", "#Plot and save figures #======================= if plotpos == 1: print('='*50) print('Saving result plots...')", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\")", "the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation module", "the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O vector", "+ \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data", "NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas", "find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center)", "dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst", "for color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors()", "module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5]", "atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path", "self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 =", "positions print('='*50) print('Refining atom positions for A-site atoms...') print('This may take time...') sublattice_A", "self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330,", "acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3. The", "range(-1, -len(file), -1): if file[idx] == s: #find the file extension and remove", "convenient tool to calculate atomic displacements in perovskite structures This app was designed", "#==== Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range", "VecMap --- a convenient tool to calculate atomic displacements in perovskite structures This", "nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from two points", "with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the", "self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\")", "getDirectory(file) #Set the working path file_path = getDirectory(file, '/') #Set the parent path", "self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130,", "un-distorted perovskite structure #A, B are lists of atom coordinates; Ua is the", "Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2: ap_center =", "import os import numpy as np import matplotlib.pyplot as plt import math import", "self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\")", "O print('='*50) print('Subtracting sublattice A and B from the image using 2D gaussian", "Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages in your", "600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5", "os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data", "if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting", "{}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the", "A = (p1[1] - p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1]", "well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n", "sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_())", "A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos,", "ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom,", "bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to", "from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change====", "questions and suggestions to <EMAIL>. Please see the \"Disclaimer\" before use! Hope you", "550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16", "* L2[2] - L1[2] * L2[0] if D != 0: x = Dx", "a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A", "QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr <NAME>. Redistribution and", "======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python", "\"<html><head/><body><p>Try a few separation factors around the given number to determine the best", "*.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title, scale, units,", "module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text()))", "overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1],", "ideal_positions = [] Neighbor_positions = [] if not img_110: #calculate image [001] for", "self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\")", "#==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini =", "1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color", "QWidget() # Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100", "function to find the neighboring atoms of P(x,y) from a list of atoms", "#======================= #Plot and save figures #======================= if plotpos == 1: print('='*50) print('Saving result", "+ '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image", "color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation module ======================================================== #==== Connected to", "for the society.<br>\"\\ \"If you like this app, show your appreciation by <a", "ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map", "= find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2)", "ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5)", "available if find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined", "Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0:", "match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector", "plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only", "path etc. ====================== #===== Connected to self.pushButton ================================================= def openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select", "100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas =", "an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function", "path file_path = getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path)", "QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22))", "if event.button == 1: # Left mouse button x = np.float(event.xdata) y =", "support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define figure", "n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] +", "s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of", "find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w')", "if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale)", "f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on", "\"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or [011]", "======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s,", "color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map =", "dx < 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else:", "if not img_110: #calculate image [001] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua", "button ==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information)", "f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to '", "if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!') else:", "251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21))", "= PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #====", "the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox()", "Load displacement from csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self):", "+ n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com,", "- L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2]", "n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] +", "sublattice #img: an array of image data; ini_pos: initial positions; atom_name: a string", "disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data) #Make a", "\"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check", "i in range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if", "refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle", "return x,y else: return False def math_center(a, b, c, d): #Define a function", "self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 =", "scale = s.axes_manager[0].scale #Read scale data from the image units = s.axes_manager[0].units #Read", "f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos ==", "/ scale * 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor)", "the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate vector map", "self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10,", "f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show()", "f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar ==", "[001] and 0.5*a for [110] x, y = P N = [a for", "QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16))", "main scripts start from here if cal_site == 0:#Calculate A site disp_atom =", "in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod = [a", "n = len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O", "coding: utf-8 -*- #VecMap0.1 #The first versio of VecMap from PyQt5 import QtCore,", "layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the", "QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def", "file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv' if", "disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)') file", "vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0]", "= self.fig.add_subplot(111) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar", "(n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 =", "self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate)", "O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement from", "if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 =", "' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox()", "angle distribution of the vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def", "scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color =", "self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20,", "+ '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O == 1: global f_O_site,", "= Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0", "self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\")", "#Plot B-site atom positions with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1:", "680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False)", "factor to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\",", "if D != 0: x = Dx / D y = Dy /", "elif dy >= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180", "to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc,", "'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #======================= if plotpos == 1: print('='*50)", "= ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos =", "22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap)", "from PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg", "using 2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and", "z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2", "the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few", "working path file_path = getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path):", "\"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et", "and FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig", "figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame()", "file and set up global variables such as path etc. ====================== #===== Connected", "ang_lst[0] for a in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx", "displacement data was found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button", "color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both", "def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and FigCanvas objects.", "#====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector", "list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O vector map", "Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) /", "the zone axis for the initial position of B: typically 3 for [001]", "= [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp,", "191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 =", "N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) **", "and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig", "displacements in perovskite structures This app was designed by Dr. <NAME>. Address your", "sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50)", "scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters (average)", "self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 =", "= QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3. The author \"", "QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51))", "#If enabled, will calculate the displacement of O atoms in relation to sublattice", "self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\")", "28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap)", "\"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that will", "scale): #find atomic displacement of A #A_com, A are lists of atom coordinates;", "ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1", "self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)", "remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining atom positions for sublattice B...')", ">= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0", "= PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g')", "\"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\"))", "leave it as [0]. ang_lst = [int(a) for a in ang_lst] color_lst =", "Create the navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)", "# Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in", "for single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for a in", "return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines =", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "y = Dy / D return x,y else: return False def math_center(a, b,", "ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for", "my_path) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0],", "ang_bond.append((360 - ang_lst_mod[-1]) // 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang =", "atom_name, atom_color='r'): #Refine atom positions for a sublattice #img: an array of image", "= PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2,", "0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the input image", "to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\", "120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont()", "\"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\")", "<a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected", "to make a ADF-like image # Draw an image global f_original_img f_original_img =", "for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting sublattice", "disp (nm), angle (deg)\\n') for data in disp: disp_data.write('{}, {}, {}, {}, {},", "bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path +", "/ 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find", "* scale Uc = math.sqrt(z1[0]**2 + z1[1]**2) * scale print('='*50) print('Estimated lattice parameters", "B site; 1 to calculate B site in relative to A site if", "in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar ==", "if self.radioButton_2.isChecked(): cal_site = 1 cal_110 = img_110 #If the input image is", "ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only", "len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A,", "= find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path +", "find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of", "find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic positions for A in", "91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap)", "(n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A,", "array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results", "guys\") returnValue = msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of", "pattern. For single color rendering, just leave it as [0]. ang_lst = [int(a)", "versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import *", "191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51))", "= QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the", "def load_from_csv(self): # Load displacement data from the csv file saved previously global", "self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with", "PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put", "==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information)", "\\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking", "disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data:", "sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined", "np import matplotlib.pyplot as plt import math import copy from scipy.spatial import distance", "41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\")", "motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define figure canvas =================================================== class", "Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl", "in px, 0.707*a for [001] and 0.5*a for [110] x, y = P", "if file[idx] == s: #find the file extension and remove it. '/' for", "self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60,", "Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor", "if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5,", "self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all", "remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if event.button ==", "saved previously global s, my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name", "for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2]", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize", "= Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only", "vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225", "np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions =", "ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for", "columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\"))", "self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image", "is [110], turn this on. O map is not supported for [110] yet.", "msg.exec() #============ Load displacement from csv module ==================================== #============ Connected to self.pushButton_7 =======================================", "'_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the image f_AB", "title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to ' + my_path", "atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site", "disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar == 1:", "print(''' Welcome to the first version of VecMap --- a convenient tool to", "\"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\",", "module =================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A,", "in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the canvas #", "self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap)", "displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y", "QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton", "Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar =", "+ n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions))", "2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1]) /", "Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM", "= msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should", "def readImage(file): #Load raw image file for process. #Require Hyperspy package s =", "the working path file_path = getDirectory(file, '/') #Set the parent path if not", "which \"\\ \"are partially incorporated in the program. Please \"\\ \"consider citing/adding acknowledgement", "on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image)", "= msg.exec() #======== Display angle distribution of the vectors module =========================== #======== Connected", "#============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and", "self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650,", "f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to", "map has been saved to ' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom))", "displacement of A #A_com, A are lists of atom coordinates; Ua is the", "10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False)", "the radio button # 0 to calculate A site in relative to B", "= 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if file:", "self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4", "U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w')", "self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780,", "(should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\",", "sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50)", "global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off()", "self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\")", "\\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions", "O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes()", "al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv.", "done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array. #lattice_list =", "range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1])", "Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402,", "[] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if", "to '+ my_path) except NameError: #Pop up an error window msg = QMessageBox()", "self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20,", "\"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\"", "def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions)", "'_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O == 1:", "Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\",", "\"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1:", "Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001]", "to calculate atomic displacements in perovskite structures This app was designed by Dr.", "100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas =", "positions with the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of", "5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi)", "__init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame =", "position of O atoms if find_O == 1: #Find initial positions for O", "= color_lst vec_data_color = copy.deepcopy(vec_data) #Make a copy so it does not modify", "self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and FigCanvas", "20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position)", "= QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381,", "and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============", "import remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot as plt import", "B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title +", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle distribution of the vectors module", "plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site", "to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0,", "Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\",", "==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from", "= readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units =", "QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22))", "positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement module ================================= #====================", "me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg = QMessageBox()", "#==================== Modules and helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import", "import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do", "vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp", "+ n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0]", "251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20))", "def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice #img: an", "QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16))", "the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms')", "original image #Refine O positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O", "displacement from csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): #", "ABF contrast to make a ADF-like image # Draw an image global f_original_img", "650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14", "was designed by Dr <NAME>. Redistribution and use in source, \" \\ \"with", "units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files", "a, b, c, d #Find the diagonal of a M = [b,c,d] diag_idx", "self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 =", "16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap)", "780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap)", "green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate", "print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the", "420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16))", "self.pushButton_14 =========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else:", "msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10", "matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def readImage(file): #Load raw image", "plotpos = 1 else: plotpos = 0 try: #Refine atom positions print('='*50) print('Refining", "self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 =", "= QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291,", "41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\")", "was found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============", "image pixel size disp = [] for atom in A_com: arrow_end = closest_node(atom,A)[1]", "and save if O_map == 1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1,", "= readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the", "#A list to store the neighboring atoms N = sorted(N, key=lambda x: (x[0]", "for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len >", "self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize the atom positions for refining.", "and set up global variables such as path etc. ====================== #===== Connected to", "= sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O", "disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement", "f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to", "2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) /", "keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range =", "51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\")", "positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an", "lst in disp] global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles,", "s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O", "data in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome to", "\"\\ \"are partially incorporated in the program. Please \"\\ \"consider citing/adding acknowledgement for", "O vector map module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len", "315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue", "(nm), angle (deg)\\n') for data in disp: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0],", "sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define", "= L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x", "self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170,", "been saved to ' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError:", "scale): #calculate the ideal atomic positions for O in a un-distorted perovskite structure", "Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we have only one", "#Find the zone axis for the initial position of B: typically 3 for", "atom (x,y); A: a list of atoms; Ua: A threashold in px, 0.707*a", "function to find the closest node in an array closest_index = distance.cdist([node], nodes).argmin()", "f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your", "#Refine B-site atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B", "and 1 for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1] else: zone_axis", "#Save a backup file in hspy format image = s.data if ABF ==", "A and B from the original image #Refine O positions print('='*50) print('Refining atom", "{:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50)", ", *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global file,", "vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self,", "in relation to sublattice B. U_avg = (Ua + Uc)/2 #Unit cell parameter", "= load_disp_data_from_csv(file) # Look for the O data disp_atom = file[-15:-9] file_O_disp =", "the estimated lattice paramter in nm; scale is the image pixel size disp", "initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement", "module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data", "0.5*a for [110] x, y = P N = [a for a in", "in relative to B site; 1 to calculate B site in relative to", "O positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions,", "me!\") returnValue = msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self,", "B from the original image #Refine O positions print('='*50) print('Refining atom positions for", "QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect all the functions=============================================", "Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout", "angles (degrees) of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g.,", "Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402,", "relative to A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site =", "process. #Require Hyperspy package s = load(file) return s def getDirectory(file, s='.'): #Make", "separation factor to initialize the atom positions for refining. Adding/removing atoms by left-click.</p></body></html>\"))", "msg.exec() except IndexError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"The list of colors should match", "self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20,", "Define a function to find the neighboring atoms of P(x,y) from a list", "positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle distribution of the", "0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement", "130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3", "#Plot both A-site and B-site on the image f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site", "* L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D", "= QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91,", "openfile(self): openfile_name = QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff ,", "def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a", "self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton =", "scipy.spatial import distance from matplotlib_scalebar.scalebar import ScaleBar #====Helper functions, do not change==== def", "O displacement data!') else: find_O = 0 print('No O displacement data was found!", "do not change==== def readImage(file): #Load raw image file for process. #Require Hyperspy", "A_com, scale): #find atomic displacement of A #A_com, A are lists of atom", "if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color", "successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #==== Connected to self.pushButton_2", "#Read scale data from the image units = s.axes_manager[0].units #Read units s.save(my_path +", "[110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice", "the image units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save", "= QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\")", "separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop", "= s.data disp = load_disp_data_from_csv(file) # Look for the O data disp_atom =", "Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add", "plotting return ideal_positions, Neighbor_positions for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale", "self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\")", "find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms", "has been saved to ' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except", "and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360", "msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\", "== 1: #Find initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB", "separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys", "sublattice A from the image using 2D gaussian fit...') print('This may take time...')", "openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title", "f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see", "Modules and helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions", "a HR-STEM image with a perovskite structure. Support [001] and [011] zone axes.", "if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title", "with Python 3. The author \" \\ \"acknowledges the HyperSpy and Atomap packages", "canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame)", "atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to", "= QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap)", "U_avg, disp, disp_O, disp_atom # Read cal_site from the radio button # 0", "ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos,", "first version of VecMap --- a convenient tool to calculate atomic displacements in", "[a - ang_lst[0] for a in ang_lst] ang_bond = [] for idx in", "the coloring pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\"))", "< 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation", "A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions =", "2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua,", "disp, disp_O, disp_atom # Read cal_site from the radio button # 0 to", "msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() print('') #========= Generate", "can use add_axes # instead of add_subplot, but then the subplot # configuration", "QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue =", "!= f_ini.axes: return if event.button == 1: # Left mouse button x =", "= distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1]) center = intersection(L1,", "super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024))", "#====Application entry================================== def main(): print('='*50) print(''' Welcome to the first version of VecMap", "getDirectory(file, '/') #Set the parent path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file)", "a sublattice #img: an array of image data; ini_pos: initial positions; atom_name: a", "\"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to ' + my_path +", "atoms positions for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms", "self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680,", "window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\")", "elif dx < 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180", "from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def", "site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\"))", "- p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return", "by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\")", "show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\", "\\ \"with or without modification, are permitted. Any redistribution must remain \"\\ \"the", "# Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{}", "import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT", "ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has", "color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def", "Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range", "refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if", "array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if", "a perovskite structure. Support [001] and [011] zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\"))", "self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap)", "data in neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx]))", "my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please", "110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\") self.pushButton_2", "self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\"))", "[001] or [011] zone. Only check Refine Oxygen if O columns are visible.</p></body></html>\"))", "show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\")", "the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi", "self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 =", "self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20,", "= msg.exec() #============ Donate me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def", "list of atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a", "for [001] and 1 for [110] if img_110 == 1: zone_axis = sublattice_A.zones_axis_average_distances[1]", "instead of add_subplot, but then the subplot # configuration tool in the navigation", "self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\")", "\"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show", "= sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue", "been saved to '+ my_path) except NameError: #Pop up an error window msg", "s, my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the", "self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText)", "= QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\")", "ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else:", "L2): #A function to find the intersection point of two lines D =", "int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5)) # Create canvas for", "in range(9): s_factor = sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor", "=========================================== def show_O_vec_map(self): O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar", "PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize", "ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if", "self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3", "if event.inaxes != f_ini.axes: return if event.button == 1: # Left mouse button", "/ D return x,y else: return False def math_center(a, b, c, d): #Define", "size disp = [] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len =", "180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len, vec_ang])", "msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display", "Only check Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\",", "vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as", "title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image", "self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap)", "\"<br>\" \"First version release!<br>\" \"Get more information and<br> source code from my <a", "yellow for single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for a", "scale * 0.5) if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return", "vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst = color_lst vec_data_color = copy.deepcopy(vec_data)", "f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in", "lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return", "in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 -", "self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200,", "T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE", "map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors", "= Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes", "done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if find_O == 1: #Find", "s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from", "in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map", "must remain \"\\ \"the above copyright. When a scientific publication is reached through", "path if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale =", "750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 = QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10", "self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText) self.label_10.setScaledContents(False) self.label_10.setWordWrap(True) self.label_10.setObjectName(\"label_10\") self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10,", "image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions =", "find the neighboring atoms of P(x,y) from a list of atoms A. #", "f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2,", "ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) //", "self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\",", "csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement", "my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============", "#A list of displacement directions. This is used to determine the coloring pattern.", "sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua,", "calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\"))", "a scientific publication is reached through the \"\\ \"app, please add the following", "Contact\") returnValue = msg.exec() #============ Donate me button ==================================================== #============ Connected to self.pushButton_12", "# self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied to the canvas", "(n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 =", "= QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30,", "s.axes_manager[0].scale #Read scale data from the image units = s.axes_manager[0].units #Read units s.save(my_path", "= ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector", "vector map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\",", "scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector", "global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully", "atoms done!') #Construct atom position results with sublattice A and B. #atom_lattice =", "file, my_path, file_path, title, scale, units, s, image, ABF, img_110 file = openfile_name[0]", "+ '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions", "HyperSpy v1.5.2 (2019).</a>\" \\ \"<br>\" \"<NAME>. et al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging", "color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False)", "dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o', ms=5,", "AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002", "in an array closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find", "atoms if available if find_O == 1: global f_O_site, f_all f_O_site = PlotCanvas()", "self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom positions. Check [001] or", "ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight')", "get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1], marker='o',", "site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked(): cal_site = 1 cal_110 =", "an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\")", "sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A,", "45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\",", "0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx <", "= QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171,", "vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of", "data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to ' + my_path", "vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist =", "the ideal atomic positions for O in a un-distorted perovskite structure #only support", "to ' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg =", "+ n_1[0]) / 2, (n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0]", "= SeparationCanvas() for i in range(9): s_factor = sep - 4 + i", "= QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460, 91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371,", "differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors", "atomic positions for A in a un-distorted perovskite structure #A, B are lists", "zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua", "does not modify the original list if len(ang_lst) == 1: for vec in", "from a list of atoms A. # P:a given atom (x,y); A: a", "f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2)", "1: s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like", "nm; scale is the image pixel size #return a list of tuples ideal_positions", "/ D y = Dy / D return x,y else: return False def", "event.inaxes != f_ini.axes: return if event.button == 1: # Left mouse button x", "nodes): #A function to find the closest node in an array closest_index =", "- L1[2] * L2[0] if D != 0: x = Dx / D", "self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure", "B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B,", "650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\")", "the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about)", "Find separation canvas ========================================= class SeparationCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find", "#Make the working directory and return the path. for idx in range(-1, -len(file),", "O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy", "vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod =", "check Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step", "\"acknowledges the HyperSpy and Atomap packages which \"\\ \"are partially incorporated in the", "O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1],", "np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5:", "= PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')", "'.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found O displacement data!')", "tuples ideal_positions = [] Neighbor_positions = [] if not img_110: #calculate image [001]", "16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap)", "ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O", "an array of image data; ini_pos: initial positions; atom_name: a string for name;", "self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 =", "(average) from the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc,", "\"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #===========", "L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] -", "#========= Connected to self.pushButton_6 =========================================== def show_vec_map(self): a_len = int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar", "+ 5)) # Create canvas for drawing try: global f_sep f_sep = SeparationCanvas()", "= 1 else: plotpos = 0 try: #Refine atom positions print('='*50) print('Refining atom", "print('Subtracting sublattice A from the image using 2D gaussian fit...') print('This may take", "math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return ideal_positions, Neighbor_positions for atom in", "A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1]", "self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io import load from", "is the image pixel size #return a list of tuples ideal_positions = []", "range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec)", "positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O", "from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import", "<NAME>. Address your questions and suggestions to <EMAIL>. Please see the \"Disclaimer\" before", "= 0 try: #Refine atom positions print('='*50) print('Refining atom positions for A-site atoms...')", "for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial position of", "# configuration tool in the navigation toolbar wouldn't # work. # self.axes =", "sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions", "\\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1:", "\"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\"))", "np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else:", "a vector map. Set the coloring pattern by checking the vector angle distribution.</p></body></html>\"))", "marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an", "230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False)", "return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic positions", "the subplot # configuration tool in the navigation toolbar wouldn't # work. #", "reached through the \"\\ \"app, please add the following reference: <br>\"\\ \"1. Ma,", "separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()),", "Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the", "intersection point of two lines D = L1[0] * L2[1] - L1[1] *", "\"are partially incorporated in the program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy", "\"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List", "+ '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp", "Ua, scale): #calculate the ideal atomic positions for O in a un-distorted perovskite", "from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self,", "ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice #img: an array of", "for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\",", "= s.axes_manager[0].scale #Read scale data from the image units = s.axes_manager[0].units #Read units", "self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp] global", "+ i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor)", "initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial", "from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from", "2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1]) /", "f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path +", "is reached through the \"\\ \"app, please add the following reference: <br>\"\\ \"1.", "self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210,", "y disp (px), disp (nm), angle (deg)\\n') for data in disp: disp_data.write('{}, {},", "of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor", "(*)') global file, my_path, file_path, title, scale, units, s, image, ABF, img_110 file", "251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 =", "QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16))", "self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find", "Refine Oxygen if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4.", "91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 =", "print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg,", "neighbor_data: for data in neighbor_pos: n = len(data) for idx in range(n): neighbor_data.write('{0},", "sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2) ** 0.5) return", "print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions with", "pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\"))", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O", "vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod = [a -", "find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a function to find the neighboring atoms", "positions for sublattice B...') print('Almost there...') sublattice_B = find_atom(image_without_A, B_positions, 'B-site atoms', atom_color='blue')", "path def find_atom(img, ini_pos, atom_name, atom_color='r'): #Refine atom positions for a sublattice #img:", "atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine", "'.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if find_O == 1: global f_O_site, f_all", "the displacement data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px),", "self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290, 460,", "for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in", "= int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini", "title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O", "+ my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "def cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom # Read cal_site", "\"the above copyright. When a scientific publication is reached through the \"\\ \"app,", "disp_data.write('\\n') #Save the neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w') as", "ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight')", "dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx, dy. if", "f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar", "except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey", "remove_atoms_from_image_using_2d_gaussian import os import numpy as np import matplotlib.pyplot as plt import math", "< (Ua * tol) **2] #A list to store the neighboring atoms N", "120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 =", "1: global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms')", "self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png'))", "(Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s) to see the\\n displacement", "single-color rendering return vec_data_color ang_lst_mod = [a - ang_lst[0] for a in ang_lst]", "parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Find separation factors') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget()", "saved to ' + my_path + title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg", "else: s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map", "A in a un-distorted perovskite structure #A, B are lists of atom coordinates;", "= PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')", "self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Find separation canvas =========================================", "vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar if s_bar == 1: scalebar", "3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\"))", "= ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate", "modification, are permitted. Any redistribution must remain \"\\ \"the above copyright. When a", "the first version of VecMap --- a convenient tool to calculate atomic displacements", "image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0],", "atom_name: a string for name; atom_color: a string for color #img_110: For [110]", "calculate the displacement of O atoms in relation to sublattice B. U_avg =", "= sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua =", "Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox()", "A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2: ap_center", "done!') #Construct atom position results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image,", "\"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #=====", "#A function to find the closest node in an array closest_index = distance.cdist([node],", "#============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\", "100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Since we", "// 2 + ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] - ang_lst[0]", "#============ About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg =", "self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True)", "Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\"))", "B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if find_O ==", "\"with or without modification, are permitted. Any redistribution must remain \"\\ \"the above", "= [] Neighbor_positions = [] if not img_110: #calculate image [001] for atom", "O atoms in relation to sublattice B. U_avg = (Ua + Uc)/2 #Unit", "(px), disp (nm), angle (deg)\\n') for data in disp_O: disp_data.write('{}, {}, {}, {},", "181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 =", "the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg = QMessageBox()", "PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O:", "# 10x10 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0),", "(n_0[1] + n_1[1]) / 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2,", "original list if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow", "scale is the image pixel size disp = [] for atom in A_com:", "calculate atomic displacements in perovskite structures This app was designed by Dr. <NAME>.", "= ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map", "atoms; Ua: A threashold in px, 0.707*a for [001] and 0.5*a for [110]", "self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set up", "Generate a vector map\")) self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles", "L2[2] - L1[2] * L2[0] if D != 0: x = Dx /", "load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines = disp.readlines() print('Displacement data:\\n')", "using VecMap') msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About", "* scale print('='*50) print('Estimated lattice parameters (average) from the image:') print('a = {:.3f}", "import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import", "self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\"))", "QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\") self.lineEdit_2 = QtWidgets.QLineEdit(VecMap) self.lineEdit_2.setGeometry(QtCore.QRect(60, 650, 30, 20))", "hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import Sublattice from atomap.tools", "href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue =", "figures #======================= if plotpos == 1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site,", ",*.bmp);;All Files (*)') global file, my_path, file_path, title, scale, units, s, image, ABF,", "s_bar = 1 else: s_bar = 0 try: # Read from lineEdits: ang_lst", "title, scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV", "#============ Load displacement from csv module ==================================== #============ Connected to self.pushButton_7 ======================================= def", "msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr <NAME>.", "pixel size #return a list of tuples ideal_positions = [] Neighbor_positions = []", "/ 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0]) / 2, (n_1[1] + n_3[1])", "print('Refining atoms done!') #Construct atom position results with sublattice A and B. #atom_lattice", "msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue", "closest_index = distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function", "#======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try: disp_angles = [lst[5] for lst", "Since we have only one plot, we can use add_axes # instead of", "41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap)", "this on. O map is not supported for [110] yet. O_map = find_O", "def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions", "self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self,", "vec_data_color: ang = vec[5] - ang_lst[0] if ang < 0: ang = ang", "msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg", "for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx])", "sep - 4 + i f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position", "msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\"", "atoms N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] ** 2)", "16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap)", "contrast to make a ADF-like image # Draw an image global f_original_img f_original_img", "msg.exec() #=========== Define figure canvas =================================================== class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent)", "Hyperspy \"\\ \"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la et al.", "data with open(my_path + title + '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px),", "if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to store", "101, 58)) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20,", "\" \\ \"acknowledges the HyperSpy and Atomap packages which \"\\ \"are partially incorporated", "0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy <", "app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my", "and helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from", "self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20,", "the image pixel size disp = [] for atom in A_com: arrow_end =", "configuration tool in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111)", "= msg.exec() #============ Contact button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self):", "self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170,", "= [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707)", "import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show()", "to determine the coloring pattern. For single color rendering, just leave it as", "= copy.deepcopy(vec_data) #Make a copy so it does not modify the original list", "O atoms if available if find_O == 1: global f_O_site, f_all f_O_site =", "image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF toggle from the", "O_map = find_O #If enabled, will calculate the displacement of O atoms in", "self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr.", "# Create the mpl Figure and FigCanvas objects. # 5x4 inches, 100 dots-per-inch", "redistribution must remain \"\\ \"the above copyright. When a scientific publication is reached", "[] if not img_110: #calculate image [001] for atom in A: Neighbor =", "add or remove atoms') f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if", "1] - ang_lst_mod[idx]) // 2 + ang_lst_mod[idx]) ang_bond.append((360 - ang_lst_mod[-1]) // 2 +", "msg.setWindowTitle('Thank you!') returnValue = msg.exec() msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ====================================================", "0.14 / scale: continue dx = arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement", "+ title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on", "') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50)", "self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 = QtWidgets.QLabel(VecMap) self.label_10.setGeometry(QtCore.QRect(130, 360, 251, 51)) self.label_10.setTextFormat(QtCore.Qt.AutoText)", "16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap)", "the checkbox ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 =", "0 try: #Refine atom positions print('='*50) print('Refining atom positions for A-site atoms...') print('This", "title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have", "msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg", "0 to calculate A site in relative to B site; 1 to calculate", "the HyperSpy and Atomap packages which \"\\ \"are partially incorporated in the program.", "citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME>", "Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1]) / 2", "= PlotCanvas() f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add", "image = s.data disp = load_disp_data_from_csv(file) # Look for the O data disp_atom", "a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True)", "VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore", "= find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0)", "site in relative to A site if self.radioButton.isChecked(): cal_site = 0 if self.radioButton_2.isChecked():", "in hspy format image = s.data if ABF == 1: s.data = np.divide(1,", "file in hspy format image = s.data if ABF == 1: s.data =", "atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial position of B: typically", "map is not supported for [110] yet. O_map = find_O #If enabled, will", "app was designed by Dr <NAME>. Redistribution and use in source, \" \\", "QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3. The author \" \\", "data[5])) disp_data.write('\\n') #Save the neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w')", "= 0 print('No O displacement data was found! Will do {} atom displacement", "coordinates; Ua is the estimated lattice paramter in nm; scale is the image", "disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else:", "in nm; scale is the image pixel size disp = [] for atom", "#============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg =", "dots-per-inch # self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig)", "self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2", "atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y]) else: A_positions.pop(atom_nearby)", "mpl Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch # self.dpi =", "list of colors should match the list of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue =", "= QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191,", "atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image overlayed. f_B_site", "in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create", "the path. for idx in range(-1, -len(file), -1): if file[idx] == s: #find", "color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms'", "PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from", "ideal_O_positions = [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale *", "site in relative to B site; 1 to calculate B site in relative", "False def math_center(a, b, c, d): #Define a function to find the mathematical", "QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 = QtWidgets.QPushButton(VecMap) self.pushButton_7.setGeometry(QtCore.QRect(290,", "separation factor.</p></body></html>\")) self.label_9.setText(_translate(\"VecMap\", \"Step 3. Refine atom positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save", "disp (px), y disp (px), disp (nm), angle (deg)\\n') for data in disp_O:", "'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n = len(data) for idx", "1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector", "f_ini.axes: return if event.button == 1: # Left mouse button x = np.float(event.xdata)", "try: #Refine atom positions print('='*50) print('Refining atom positions for A-site atoms...') print('This may", "center of four points, a, b, c, d #Find the diagonal of a", "al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS", "f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles (Degrees)') f_vec_ang_dist.axes.set_xticks(list(range(0,390,30))) f_vec_ang_dist.axes.set_ylabel('Frequency') f_vec_ang_dist.axes.set_title('Put your cursor on the peak(s)", "QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41))", "122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF", "href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============", "HR-STEM image with a perovskite structure. Support [001] and [011] zone axes. Filtered", "self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O", "<NAME>. Redistribution and use in source, \" \\ \"with or without modification, are", "a un-distorted perovskite structure #A, B are lists of atom coordinates; Ua is", "guys\") returnValue = msg.exec() #========= Generate O vector map module ============================================= #========= Connected", "self.label_12.setText(_translate(\"VecMap\", \"e.g., something around 8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that", "to sublattice B. U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from", "scale is the image pixel size #return a list of tuples ideal_positions =", "\"<html><head/><body><p>Refine atom positions. Check [001] or [011] zone. Only check Refine Oxygen if", "f_B_site.axes.set_axis_off() f_B_site.show() f_B_site.fig.savefig(my_path + title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site", "When a scientific publication is reached through the \"\\ \"app, please add the", "lattice paramter in nm; scale is the image pixel size disp = []", "= find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for", "/ 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1])", "y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x,", "ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine", "image # Draw an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off()", "+ title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the", "Python 3. The author \" \\ \"acknowledges the HyperSpy and Atomap packages which", "vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data = [] lines", "title = s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the image units", "115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY", "*.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files", "copy.deepcopy(vec_data) #Make a copy so it does not modify the original list if", "f_ini.setWindowTitle('Initial atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove", "print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with", "Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] +", "s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot", "self.checkBox.isChecked(): #Set ABF toggle from the checkbox ABF = 1 else: ABF =", "O map is not supported for [110] yet. O_map = find_O #If enabled,", "30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 =", "f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All", "returnValue = msg.exec() #============ Load displacement from csv module ==================================== #============ Connected to", "= msg.exec() #========= Generate O vector map module ============================================= #========= Connected to self.pushButton_14", "print('Found O displacement data!') else: find_O = 0 print('No O displacement data was", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label", "Redistribution and use in source, \" \\ \"with or without modification, are permitted.", "data was found! Will do {} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ====================================================", "according to dx, dy. if dy >= 0 and dx >= 0: vec_ang", "global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions for", "= QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20,", "returnValue = msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13 ===============================", "refined positions and original image as hdf5 file. This file can be called", "angle according to dx, dy. if dy >= 0 and dx >= 0:", "data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4],", "matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap):", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #======== Display angle distribution of the vectors", "scale, units, s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked(): #Set ABF", "= msg.exec() print('') #========= Generate vector map module ============================================= #========= Connected to self.pushButton_6", "Ua, scale, img_110=False): #calculate the ideal atomic positions for A in a un-distorted", "atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the given number", "FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure", "f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module ===============================================", "#only support [001] images ideal_O_positions = [] for atom in A: Neighbor =", "291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211, 16)) self.label_20.setObjectName(\"label_20\") self.pushButton_9 =", "B-site atoms print('='*50) print('Refining atom positions for sublattice B...') print('Almost there...') sublattice_B =", "self.fig.add_subplot(111) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar =", "version release!<br>\" \"Get more information and<br> source code from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1:", "readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale units = s.axes_manager[0].units", "of angles!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #========= Generate O vector map module", "et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE", "a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show()", "self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 =", "#img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False)", "units = s.axes_manager[0].units #Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup", "/ 2 ideal_O_positions.append(o_0) o_1 = (n_0[0] + n_2[0]) / 2, (n_0[1] + n_2[1])", "been loaded!'.format(file)) my_path = getDirectory(file) #Set the working path file_path = getDirectory(file, '/')", "app was designed by Dr. <NAME>. Address your questions and suggestions to <EMAIL>.", "'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom =", "disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n')", "sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions", "#Plot O atoms if available if find_O == 1: global f_O_site, f_all f_O_site", "+ title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures", "= list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of A", "the displacement vector angle according to dx, dy. if dy >= 0 and", "try: global f_sep f_sep = SeparationCanvas() for i in range(9): s_factor = sep", "the original image overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms')", "self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130,", ", *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All", "to B site; 1 to calculate B site in relative to A site", "= find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement", "if ABF == 1: s.data = np.divide(1, s.data) #Inverse the ABF contrast to", "the \"Disclaimer\" before use! Hope you get good results and publications from it!", "Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg, disp, disp_O,", "51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine)", "blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\"))", "self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the vector", "f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3)", "lattice paramter in nm; scale is the image pixel size #return a list", "\"\\ \"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la et al. <a", "and use in source, \" \\ \"with or without modification, are permitted. Any", "distance.cdist([node], nodes).argmin() return closest_index,nodes[closest_index] def line(p1, p2): #Find a line function from two", "L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if", "self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the canvas # self.mpl_toolbar =", "print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for", "<a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett. 115, 122902 (2019).</a>\" \"<br>\" \\ \"THE SOFTWARE IS PROVIDED", "= sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image print('='*50) print('Subtracting sublattice A from", "* 0.707) if len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors", "from atomap.sublattice import Sublattice from atomap.tools import remove_atoms_from_image_using_2d_gaussian import os import numpy as", "disp = load_disp_data_from_csv(file) # Look for the O data disp_atom = file[-15:-9] file_O_disp", "colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of", "f_sep.axes[i].set_aspect('equal') f_sep.axes[i].set_axis_off() if s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0],", "s_factor < 1: continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r')", "#s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title = 'Use Arrow", "displacement data!') else: find_O = 0 print('No O displacement data was found! Will", "Hope you get good results and publications from it! Version 0.1.1 06/13/2020 ''')", "self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20,", "#s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep", "self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set up global variables such as", "dy. if dy >= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif", "330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4", "91, 51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\")", "msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self):", "the neighboring atoms of P(x,y) from a list of atoms A. # P:a", "4, sep + 5)) # Create canvas for drawing try: global f_sep f_sep", "= s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look for the O", "image #Refine O positions print('='*50) print('Refining atom positions for sublattice O...') sublattice_O =", "permitted. Any redistribution must remain \"\\ \"the above copyright. When a scientific publication", "(a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to store the", "06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\",", "a copy so it does not modify the original list if len(ang_lst) ==", "= {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site atoms from the image", "= PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1: Vector Map of Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in", "return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): #", "get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom positions", "by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information and<br>", "as np import matplotlib.pyplot as plt import math import copy from scipy.spatial import", "the estimated lattice paramter in nm; scale is the image pixel size #return", "may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms print('='*50) print('Refining", "image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original image #Refine O positions", "make this app freely available for the society.<br>\"\\ \"If you like this app,", "= QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "= QMessageBox() msg.setText(\"I will make this app freely available for the society.<br>\"\\ \"If", "button ==================================================== #============ Connected to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask", "== 1: s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a", "(*.tif , *.tiff , *.jpg , *.jpeg , *.png ,*.bmp);;All Files (*)') global", "with open(file,'r') as disp: disp_data = [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0])", "'_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos == 1: print('All figures have been saved", "of O print('='*50) print('Subtracting sublattice A and B from the image using 2D", "Donate me button ==================================================== #============ Connected to self.pushButton_12 ======================================= def donate(self): msg =", "one plot, we can use add_axes # instead of add_subplot, but then the", "axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create", "you get good results and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50)", "(Ua + Uc)/2 #Unit cell parameter estimated from the image. #========================================================================= #The main", "s.data if ABF == 1: s.data = np.divide(1, s.data) #Inverse the ABF contrast", "-C def intersection(L1, L2): #A function to find the intersection point of two", "in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5]))", "= QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91,", "positions and original image as hdf5 file. This file can be called later.", "+ title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px),", "QWidget() # Create the mpl Figure and FigCanvas objects. # 10x10 inches, 100", "print('') #========= Generate vector map module ============================================= #========= Connected to self.pushButton_6 =========================================== def", "= file[-15:-9] file_O_disp = my_path + title + '-disp_O_by_' + disp_atom + '.csv'", "Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the mpl Figure and", "two lines D = L1[0] * L2[1] - L1[1] * L2[0] Dx =", "ABF toggle from the checkbox ABF = 1 else: ABF = 0 if", "find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1", "o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3)", "650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41, 16)) self.label_13.setObjectName(\"label_13\") self.checkBox_5", "self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should", "vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0: vec_ang =", "Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey", "95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 =", "= my_path + title + '-disp_O_by_' + disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O", "changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate", "return N def closest_node(node, nodes): #A function to find the closest node in", "results and publications from it! Version 0.1.1 06/13/2020 ''') print('='*50) import sys app", "len(data) for idx in range(n): neighbor_data.write('{0}, {1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and", "Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around the given", "(px), y disp (px), disp (nm), angle (deg)\\n') for data in disp: disp_data.write('{},", "positions of O print('='*50) print('Subtracting sublattice A and B from the image using", "class PlotCanvas(QMainWindow): def __init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame", "ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0,", "#The first versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets", "self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20, 780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap)", "left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been", "self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90,", "= arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx,", "app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget() ui = Ui_VecMap() ui.setupUi(VecMap) VecMap.show() sys.exit(app.exec_()) if", "left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has", "self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from the csv file saved", "xy_positions[1], marker='o', ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up", "Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys.", "s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site", "= QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20, 90, 371,", "if self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if file: print('{} has", "dy >= 0 and dx >= 0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >=", "= Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A + B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently", "disp_data.append([float(data) for data in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print('''", "#Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image", "self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200,", "780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11", "= img_110 #If the input image is [110], turn this on. O map", "#lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with sublattice A and B.", "global U_avg, disp, disp_O, disp_atom # Read cal_site from the radio button #", "of image data; ini_pos: initial positions; atom_name: a string for name; atom_color: a", "or without modification, are permitted. Any redistribution must remain \"\\ \"the above copyright.", "QtWidgets.QLabel(VecMap) self.label_15.setGeometry(QtCore.QRect(20, 530, 181, 16)) self.label_15.setObjectName(\"label_15\") self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16))", "= L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1]", "2 + ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] - ang_lst[0] if", "be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List", "len(Neighbor) == 4: ap_center = math_center(*Neighbor) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) #Save neighbors for plotting return", "for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor)", "20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap)", "self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650,", "int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini =", "=================================================== #==== Connected to self.pushButton_4 ================================================ def refine_atom_position(self): #Global variables: global ap_A, ap_B,", "work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial positions of O print('='*50) print('Subtracting", "vector angle according to dx, dy. if dy >= 0 and dx >=", "atom positions for A-site atoms...') print('This may take time...') sublattice_A = find_atom(s.data, A_positions,", "return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions", "#Find initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice", "if O columns are visible.</p></body></html>\")) self.checkBox_4.setText(_translate(\"VecMap\", \"[011] Zone\")) self.label_11.setText(_translate(\"VecMap\", \"Step 4. Generate a", "button # 0 to calculate A site in relative to B site; 1", "\"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more information!\"))", "= QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480,", "above copyright. When a scientific publication is reached through the \"\\ \"app, please", "msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3. The author", "in perovskite structures This app was designed by Dr. <NAME>. Address your questions", "show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\")", "#Global variables global U_avg, disp, disp_O, disp_atom # Read cal_site from the radio", "# Define a function to find the neighboring atoms of P(x,y) from a", "A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4: ap_center", "self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed", "= int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: global", "#==== Initialize atom position module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self):", "atom[1], dx, dy, scale*1000*vec_len, vec_ang]) return disp def set_arrow_color(vec_data, ang_lst, color_lst): color_lst =", "#Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 +", "in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.5) if len(Neighbor) == 2:", "results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save", "NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def", "vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_AB.axes.set_axis_off()", "\"and Atomap packages in your publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy:", "n_3 = Neighbor.pop() o_0 = (n_0[0] + n_1[0]) / 2, (n_0[1] + n_1[1])", "= msg.exec() #==== Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def", "Dx / D y = Dy / D return x,y else: return False", "+ '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions with the original image", "img_110 = 1 else: img_110 = 0 if file: print('{} has been loaded!'.format(file))", "determine the coloring pattern. For single color rendering, just leave it as [0].", "= ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale)", "in range(-1, -len(file), -1): if file[idx] == s: #find the file extension and", "and suggestions to <EMAIL>. Please see the \"Disclaimer\" before use! Hope you get", "for refining. Adding/removing atoms by left-click.</p></body></html>\")) self.label_6.setText(_translate(\"VecMap\", \"<html><head/><body><p>Try a few separation factors around", "Dy / D return x,y else: return False def math_center(a, b, c, d):", "getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title = s.metadata.General.title scale = s.axes_manager[0].scale", "= math.degrees(math.atan(dy/dx)) + 180 elif dx < 0 and dy < 0: vec_ang", "self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20,", "more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\"))", "\\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module =============================================== #====", "QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28))", "1 else: plotpos = 0 try: #Refine atom positions print('='*50) print('Refining atom positions", "{1}, '.format(*data[idx])) neighbor_data.write('\\n') #Calculate O map and save if O_map == 1: ap_2", "self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite", "#Add a scale bar if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar)", "#VecMap0.1 #The first versio of VecMap from PyQt5 import QtCore, QtGui, QtWidgets from", "global f_vec_ang_dist f_vec_ang_dist = PlotCanvas() f_vec_ang_dist.setWindowTitle('Histogram of Displacement Directions') f_vec_ang_dist.axes.hist(disp_angles, bins=50) f_vec_ang_dist.axes.set_xlabel('Displacement angles", "\"Check here for more information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate", "=============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini", "print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of O atoms if find_O", "title + '_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot both A-site and B-site on the", "+ '-{}-disp.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x disp (px), y disp", "open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data in neighbor_pos: n = len(data)", "#============ Connected to self.pushButton_7 ======================================= def load_from_csv(self): # Load displacement data from the", "neigboring atoms as well with open(my_path + 'neighboring atoms.csv','w') as neighbor_data: for data", "positions for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining atoms done!')", "it. '/' for parent path path = file[:idx] + '/' return path def", "ang_lst_mod = [a - ang_lst[0] for a in ang_lst] ang_bond = [] for", "tool in the navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) #", "self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\",", "msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #============ Load displacement from csv module ==================================== #============", "import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__()", "+ title + '-disp.csv.') except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the", "def find_ideal_O_pos(A, B, Ua, scale): #calculate the ideal atomic positions for O in", "your questions and suggestions to <EMAIL>. Please see the \"Disclaimer\" before use! Hope", "ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path + title +", "= get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, = f_ini.axes.plot(xy_positions[0], xy_positions[1],", "A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title +", "QFileDialog.getOpenFileName(self,'Select Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg", "The author \" \\ \"acknowledges the HyperSpy and Atomap packages which \"\\ \"are", "x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events() xy_positions = get_xy_pos_lists(A_positions) dp, =", "QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16))", "160, 251, 51)) self.label_5.setTextFormat(QtCore.Qt.AutoText) self.label_5.setScaledContents(False) self.label_5.setWordWrap(True) self.label_5.setObjectName(\"label_5\") self.label_6 = QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251,", "\"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information and<br> source code from my", "with open(my_path + title + '-disp_O_by_{}.csv'.format(disp_atom),'w') as disp_data: disp_data.write('x (px), y (px), x", "except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\")", "#============ Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program", "matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure", "the following reference: <br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett.", "csv\")) self.pushButton_8.setText(_translate(\"VecMap\", \"Disclaimer\")) self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check", "just leave it as [0]. ang_lst = [int(a) for a in ang_lst] color_lst", "math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) +", "def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for", "= sublattice_A.zones_axis_average_distances[1] else: zone_axis = sublattice_A.zones_axis_average_distances[2] #Calculate lattice parameter z0 = sublattice_A.zones_axis_average_distances[0] z1", "toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout()", "self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\")", "path. for idx in range(-1, -len(file), -1): if file[idx] == s: #find the", "load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find separation", "self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate", "ap_B = sublattice_B.atom_positions ##Refined atoms positions for B-site. NumPy array. print('Refining B-site atoms", "A site disp_atom = 'A-site' rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 =", "to store the neighboring atoms N = sorted(N, key=lambda x: (x[0] ** 2", "== 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True)", "\\ \"This app was designed by Dr <NAME>. Redistribution and use in source,", "a list of atoms A. # P:a given atom (x,y); A: a list", "we can use add_axes # instead of add_subplot, but then the subplot #", "Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\", "sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2):", "N def closest_node(node, nodes): #A function to find the closest node in an", "B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b')", "'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom =", "vector map module ============================================= #========= Connected to self.pushButton_14 =========================================== def show_O_vec_map(self): O_len =", "= QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191,", "perovskite structure #only support [001] images ideal_O_positions = [] for atom in A:", "\"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information and<br> source code from", "/ 2, (n_0[1] + n_2[1]) / 2 ideal_O_positions.append(o_1) o_2 = (n_1[0] + n_3[0])", "QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51))", "VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40,", "set up global variables such as path etc. ====================== #===== Connected to self.pushButton", "371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\")", "440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14", "Sublattice(ini_pos, image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice", "import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import", "Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001]", "(x,y); A: a list of atoms; Ua: A threashold in px, 0.707*a for", "guys\") returnValue = msg.exec() #==== Refine atom position module =================================================== #==== Connected to", "1 print('Found O displacement data!') else: find_O = 0 print('No O displacement data", "atom positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms')", "A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol) **2] #A list to", "lines[1:]: lin_data = lin.strip().split(', ') disp_data.append([float(data) for data in lin_data]) return disp_data #====Application", "self.checkBox_4.isChecked(): img_110 = 1 else: img_110 = 0 if file: print('{} has been", "atom positions for sublattice O...') sublattice_O = find_atom(image_without_AB, O_positions, 'O sites', atom_color='g') ap_O", "an image global f_original_img f_original_img = PlotCanvas() f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has", "self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5 = QtWidgets.QLabel(VecMap) self.label_5.setGeometry(QtCore.QRect(130, 160,", "publication is reached through the \"\\ \"app, please add the following reference: <br>\"\\", "if not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale", "self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv) self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap):", "= QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30, 20)) self.lineEdit_3.setObjectName(\"lineEdit_3\") self.label_13 = QtWidgets.QLabel(VecMap) self.label_13.setGeometry(QtCore.QRect(110, 650, 41,", "author \" \\ \"acknowledges the HyperSpy and Atomap packages which \"\\ \"are partially", "= ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom))", "= QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120,", "title + \"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement", "the initial position of B: typically 3 for [001] and 1 for [110]", "matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self):", "def vec_ang_dist(self): try: disp_angles = [lst[5] for lst in disp] global f_vec_ang_dist f_vec_ang_dist", "incorporated in the program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and", "as [0]. ang_lst = [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot====", "image print('='*50) print('Subtracting sublattice A from the image using 2D gaussian fit...') print('This", "in vec_data_color: ang = vec[5] - ang_lst[0] if ang < 0: ang =", "(*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg , *.png", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please refine the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec()", "f_ini.show() def onclick(event): if event.inaxes != f_ini.axes: return if event.button == 1: #", "191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 =", "but then the subplot # configuration tool in the navigation toolbar wouldn't #", "self.dpi = 100 self.fig = Figure((5.0, 4.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) #", "+ \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved to ' + my_path +", "460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7", "sublattice_A.construct_zone_axes() #Find the zone axis for the initial position of B: typically 3", "= sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale Uc = math.sqrt(z1[0]**2 +", "0: vec_ang = math.degrees(math.atan(dy/dx)) elif dy >= 0 and dx < 0: vec_ang", "msg.setText(\"This program was written with Python 3. The author \" \\ \"acknowledges the", "self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site", "checkbox ABF = 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1", "self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20,", "color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all", "self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\", \"B-site\")) self.label_21.setText(_translate(\"VecMap\", \"Select which site to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\",", "disp_atom # Read cal_site from the radio button # 0 to calculate A", "if s_bar == 1: scalebar = ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title", "QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30, 20)) self.lineEdit.setObjectName(\"lineEdit\") self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16))", "yet. O_map = find_O #If enabled, will calculate the displacement of O atoms", "# Look for the O data disp_atom = file[-15:-9] file_O_disp = my_path +", "Image','','DigitalMicrograph (*.dm3 , *.dm4);;Image files (*.tif , *.tiff , *.jpg , *.jpeg ,", "self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780, 101, 58)) font", "ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with", "in your publication:\"\\ \"<br>\" \"<NAME> la et al. <a href=\\\"http://doi.org/10.5281/zenodo.3396791\\\">hyperspy/hyperspy: HyperSpy v1.5.2 (2019).</a>\"", "by Dr <NAME>. Redistribution and use in source, \" \\ \"with or without", "print('='*50) print('Subtracting sublattice A from the image using 2D gaussian fit...') print('This may", "1 else: img_110 = 0 if file: print('{} has been loaded!'.format(file)) my_path =", ", *.jpeg , *.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title, scale,", "B, -C def intersection(L1, L2): #A function to find the intersection point of", "perovskite structure #A, B are lists of atom coordinates; Ua is the estimated", "QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib matplotlib.use('Qt5Agg') from", "f_vec_map.axes.add_artist(scalebar) f_vec_map.show() f_vec_map.fig.savefig(my_path + title + \"_{}_vec_map.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The vector map has been saved", "= [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]: lin_data", "of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135", "d #Find the diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1", "relation to sublattice B. U_avg = (Ua + Uc)/2 #Unit cell parameter estimated", "in a un-distorted perovskite structure #A, B are lists of atom coordinates; Ua", "print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the original", "if len(Neighbor) == 2: ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def", "51)) self.pushButton_7.setObjectName(\"pushButton_7\") self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8", "msg.buttonClicked.connect(disclaimerButtonClick) returnValue = msg.exec() #============ About button ==================================================== #============ Connected to self.pushButton_9 =======================================", "# 0 to calculate A site in relative to B site; 1 to", "rel_atom = 'B-site' ap_0 = ap_A.tolist() ap_1 = ap_B.tolist() else: disp_atom = 'B-site'", "sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist()", "def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\",", "Connected to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was", "name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original image as hdf5 file.", "n_3[0]) / 2, (n_1[1] + n_3[1]) / 2 ideal_O_positions.append(o_2) o_3 = (n_2[0] +", "= QtWidgets.QLabel(VecMap) self.label_21.setGeometry(QtCore.QRect(20, 460, 171, 16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81,", "120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 =", "sites', atom_color='g') ap_O = sublattice_O.atom_positions #Refined atoms positions for O. NumPy array. print('Refining", "# self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #====================", "disp_color = set_arrow_color(disp, ang_lst, color_lst) global f_vec_map f_vec_map = PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map')", "a string for color #img_110: For [110] image sublattice = Sublattice(ini_pos, image=img, color=atom_color,", "a list of atoms; Ua: A threashold in px, 0.707*a for [001] and", "s.metadata.General.title scale = s.axes_manager[0].scale #Read scale data from the image units = s.axes_manager[0].units", "the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom position", "msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg = QMessageBox() msg.setText('Thanks for using VecMap') msg.setWindowTitle('Thank you!')", "overlayed. f_B_site = PlotCanvas() f_B_site.setWindowTitle('VecMap0.1: Refined positions of B-site atoms') f_B_site.axes.imshow(image) f_B_site.axes.scatter(ap_B[:,0], ap_B[:,1],", "self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550,", "f_original_img.setWindowTitle(file) f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom", "= QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap) self.pushButton_8.setGeometry(QtCore.QRect(20,", "f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_all.axes.scatter(ap_B[:,0],", "ideal_O_positions.append(o_2) o_3 = (n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2", "variables: global ap_A, ap_B, ap_O, Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O", "sublattice B. U_avg = (Ua + Uc)/2 #Unit cell parameter estimated from the", "href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to", "2. Initialize atom positions\")) self.label_4.setText(_translate(\"VecMap\", \"Separation factor\")) self.pushButton_2.setText(_translate(\"VecMap\", \"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\"))", "s.data = np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image", "Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 =", "is used to determine the coloring pattern. For single color rendering, just leave", "self.pushButton_2 = QtWidgets.QPushButton(VecMap) self.pushButton_2.setGeometry(QtCore.QRect(20, 170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230,", "designed by Dr <NAME>. Redistribution and use in source, \" \\ \"with or", "objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0,", "parameter z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) *", "230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371,", "return if event.button == 1: # Left mouse button x = np.float(event.xdata) y", "Ua, Uc, find_O #Read checkboxes if self.checkBox_2.isChecked(): find_O = 1 else: find_O =", "import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self) self.retranslateUi(self) def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\")", "self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191, 51)) self.label_18.setTextFormat(QtCore.Qt.AutoText) self.label_18.setScaledContents(False) self.label_18.setWordWrap(True) self.label_18.setObjectName(\"label_18\") self.pushButton_7 =", "position results with sublattice A and B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list)", "= np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0] if distance.euclidean((x,y), A_positions[atom_nearby]) > 5: A_positions.append([x, y])", "if file: my_path = getDirectory(file,'/') s = readImage(my_path + 'Original image.hspy') title =", "global f_O_site, f_all f_O_site = PlotCanvas() f_O_site.setWindowTitle('VecMap0.1: Refined positions of O atoms') f_O_site.axes.imshow(image)", "= 1 else: img_110 = 0 if file: print('{} has been loaded!'.format(file)) my_path", "0: x = Dx / D y = Dy / D return x,y", "= QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap)", "QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440,", "This is used to determine the coloring pattern. For single color rendering, just", "for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return vec_data_color ang_lst_mod", "pixel size disp = [] for atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len", "VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox", "= {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve", "# Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions. This", "int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title = 'Use", "{} atom displacement only!'.format(disp_atom)) #============ Disclaimer button ==================================================== #============ Connected to self.pushButton_8 =======================================", "img_110 = 0 if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file) #Set", "up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions", "function to find the mathematical center of four points, a, b, c, d", "(nm), angle (deg)\\n') for data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0],", "images #s_peaks.metadata.General.title = 'Use Arrow keys to find an appropriate separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True)", "see the \"Disclaimer\" before use! Hope you get good results and publications from", "\"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue = msg.exec() #============ Donate me button ====================================================", "find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different", "for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position of", "copy so it does not modify the original list if len(ang_lst) == 1:", "ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2, ideal_O_pos, scale) with open(my_path", "list if len(ang_lst) == 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for", "self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\")", "zone axis for the initial position of B: typically 3 for [001] and", "= msg.exec() #==================== Calculate displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def", "self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12 = QtWidgets.QLabel(VecMap) self.label_12.setGeometry(QtCore.QRect(170, 130,", "self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20,", "appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support is my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate", "helper functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice", "780, 120, 28)) self.pushButton_8.setObjectName(\"pushButton_8\") self.label_19 = QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20", "in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale * 0.707) if len(Neighbor) == 4:", "self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line = QtWidgets.QFrame(VecMap) self.line.setGeometry(QtCore.QRect(20,", "sublattice_list=lattice_list) #Save the refined positions and original image as hdf5 file. This file", "123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T. et al. <a href=\\\"https://doi.org/10.1063/1.5115039\\\">Appl. Phys. Lett.", "x[1] ** 2) ** 0.5) return N def closest_node(node, nodes): #A function to", "A-site atom positions with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined", "few separation factors around the given number to determine the best separation factor.</p></body></html>\"))", "f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_AB.axes.scatter(ap_B[:,0], ap_B[:,1],", "print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions with the", "- p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C def intersection(L1,", "f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def onclick(event): if event.inaxes", "(deg)\\n') for data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2],", "22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap)", "following reference: <br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123,", "find_O == 1: #Find initial positions for O AB_positions = ap_A.tolist() + ap_B.tolist()", "PlotCanvas() f_all.setWindowTitle('VecMap0.1: A-site vs. B-site vs. O atoms') f_all.axes.imshow(image) f_all.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r')", "len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data", "self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support [001] and [011]", "from two points A = (p1[1] - p2[1]) B = (p2[0] - p1[0])", "O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title +", "ang_lst = [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color =", "= QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80,", "n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return", "[a for a in A if (a[0]-x)**2 + (a[1]-y)**2 < (Ua * tol)", "16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap)", "= QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251, 22)) self.lineEdit_5.setObjectName(\"lineEdit_5\") self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101,", "#Read units s.save(my_path + 'Original image.hspy', overwrite=True) #Save a backup file in hspy", "find_displacement(A, A_com, scale): #find atomic displacement of A #A_com, A are lists of", "\"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\")) self.label.setText(_translate(\"VecMap\", \"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a", "self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280,", "of B: typically 3 for [001] and 1 for [110] if img_110 ==", "self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox = QVBoxLayout() vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules", "positions of O atoms') f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path +", "radio button # 0 to calculate A site in relative to B site;", "map\")) self.lineEdit_3.setText(_translate(\"VecMap\", \"6\")) self.label_13.setText(_translate(\"VecMap\", \"Scale:\")) self.checkBox_5.setText(_translate(\"VecMap\", \"Scale bar\")) #===== Open file and set", "returnValue = msg.exec() #============ Acknowledgments button ==================================================== #============ Connected to self.pushButton_10 ======================================= def", "print('='*50) print('Estimated lattice parameters (average) from the image:') print('a = {:.3f} {}'.format(Ua, units))", "\\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg =", "self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n) for n", "wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create the navigation toolbar, tied", "(n_2[0] + n_3[0]) / 2, (n_2[1] + n_3[1]) / 2 ideal_O_positions.append(o_3) ideal_O_positions =", "information!\")) self.pushButton_9.setText(_translate(\"VecMap\", \"About\")) self.pushButton_10.setText(_translate(\"VecMap\", \"Acknoledgments\")) self.pushButton_11.setText(_translate(\"VecMap\", \"Contact\")) self.pushButton_12.setText(_translate(\"VecMap\", \"Donate me!\")) self.radioButton.setText(_translate(\"VecMap\", \"A-site\")) self.radioButton_2.setText(_translate(\"VecMap\",", "131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\") self.pushButton_4 = QtWidgets.QPushButton(VecMap) self.pushButton_4.setGeometry(QtCore.QRect(20, 370, 91, 41)) self.pushButton_4.setObjectName(\"pushButton_4\") self.label_10 =", "= 1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas() f_vec_map_O.setWindowTitle('VecMap0.1:", "p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A,", "intersection(L1, L2) return center def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal", "first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==================== Calculate displacement module ================================= #==================== Connected", "str(self.lineEdit_4.text()).split() #A list of displacement directions. This is used to determine the coloring", "button ==================================================== #============ Connected to self.pushButton_9 ======================================= def show_about(self): msg = QMessageBox() #", "msg = QMessageBox() msg.setText(\"I will make this app freely available for the society.<br>\"\\", "Ua is the estimated lattice paramter in nm; scale is the image pixel", "'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist() ap_1 = ap_A.tolist() print('='*50) print('====Calculate {}", "the image:') print('a = {:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions", "Version 0.1.1 06/13/2020 ''') print('='*50) import sys app = QtWidgets.QApplication(sys.argv) VecMap = QtWidgets.QWidget()", "{:.3f} {}'.format(Ua, units)) print('c = {:.3f} {}'.format(Uc, units)) B_positions = sublattice_A.find_missing_atoms_from_zone_vector(zone_axis) #Reomve A-site", "image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Refine atom position module", "for drawing try: global f_sep f_sep = SeparationCanvas() for i in range(9): s_factor", "+ disp_atom + '.csv' if os.path.isfile(file_O_disp): disp_O = load_disp_data_from_csv(file_O_disp) find_O = 1 print('Found", "list of tuples ideal_positions = [] Neighbor_positions = [] if not img_110: #calculate", "atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all = PlotCanvas()", "n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar, tied to the canvas", "np.divide(1, s.data) #Inverse the ABF contrast to make a ADF-like image # Draw", "return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of A #A_com, A", "atom in A_com: arrow_end = closest_node(atom,A)[1] vec_len = distance.euclidean(arrow_end,atom) if vec_len > 0.14", "self.pushButton_3 ================================================ def find_separation(self): #sep_range = (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be", "z0 = sublattice_A.zones_axis_average_distances[0] z1 = sublattice_A.zones_axis_average_distances[1] Ua = math.sqrt(z0[0]**2 + z0[1]**2) * scale", "global f_A_site, f_B_site, f_AB #Plot A-site atom positions with the original image overlayed.", "Display angle distribution of the vectors module =========================== #======== Connected to self.pushButton_5 =============================================", "for [110] x, y = P N = [a for a in A", "self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring pattern", "170, 91, 41)) self.pushButton_2.setObjectName(\"pushButton_2\") self.pushButton_3 = QtWidgets.QPushButton(VecMap) self.pushButton_3.setGeometry(QtCore.QRect(20, 230, 91, 41)) self.pushButton_3.setObjectName(\"pushButton_3\") self.label_5", "points, a, b, c, d #Find the diagonal of a M = [b,c,d]", "msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError:", "for a in ang_lst] ang_bond = [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx +", "self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text()) try: A_positions_ini = get_atom_positions(s,separation=sep) global A_positions,", "self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12 = QtWidgets.QPushButton(VecMap) self.pushButton_12.setGeometry(QtCore.QRect(280, 780,", "ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show()", "has been saved to ' + my_path + title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except", "= QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111,", "\"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45 135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match", "== 1: for vec in vec_data_color: vec.append(color_lst[0]) #set yellow for single-color rendering return", "\"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set", "self.dpi = 100 self.fig = Figure((10.0, 10.0), dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) #", "(degrees) of vectors that will be colored differently:\")) self.lineEdit_4.setText(_translate(\"VecMap\", \"45\")) self.label_15.setText(_translate(\"VecMap\", \"e.g., 45", "#s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5)) #", "40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130, 130, 30,", "3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact button ==================================================== #============", "functions =================================== from hyperspy.io import load from atomap.atom_finding_refining import get_atom_positions from atomap.sublattice import", "\"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring pattern by checking the", "PlotCanvas() f_vec_map.setWindowTitle('VecMap0.1: Vector Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3,", "print('Subtracting sublattice A and B from the image using 2D gaussian fit...') print('This", "to self.pushButton_10 ======================================= def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written", "B') sublattice_AB.construct_zone_axes() zone_axis_002 = sublattice_AB.zones_axis_average_distances[2]#Only work for [001] currently O_positions = sublattice_AB.find_missing_atoms_from_zone_vector(zone_axis_002) #Initial", "== 6: vec.append(color_lst[0]) return vec_data_color def load_disp_data_from_csv(file): with open(file,'r') as disp: disp_data =", "f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom positions", "zone axes. Filtered image is preferred.</p><p><br/></p></body></html>\")) self.lineEdit.setText(_translate(\"VecMap\", \"8\")) self.label_3.setText(_translate(\"VecMap\", \"Step 2. Initialize atom", "s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window", "images ideal_O_positions = [] for atom in A: Neighbor = find_neighboring_atoms(atom,B,Ua / scale", "#=======Connect all the functions============================================= self.pushButton.clicked.connect(self.openfile) self.pushButton_2.clicked.connect(self.ini_atom_position) self.pushButton_3.clicked.connect(self.find_separation) self.pushButton_4.clicked.connect(self.refine_atom_position) self.pushButton_13.clicked.connect(self.cal_disp) self.pushButton_5.clicked.connect(self.vec_ang_dist) self.pushButton_6.clicked.connect(self.show_vec_map) self.pushButton_14.clicked.connect(self.show_O_vec_map) self.pushButton_7.clicked.connect(self.load_from_csv)", "if ang < 0: ang = ang + 360 for i in range(len(ang_bond)-1):", "2D gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B", "(px), y (px), x disp (px), y disp (px), disp (nm), angle (deg)\\n')", "print('This may take time...') sublattice_A = find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms", "msg = QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1:", "f_A_site, f_B_site, f_AB #Plot A-site atom positions with the original image overlayed. f_A_site", "#Find the diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 =", "= [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del M[diag_idx] L2 = line(M[0],M[1])", "+ ang_lst_mod[-1]) for vec in vec_data_color: ang = vec[5] - ang_lst[0] if ang", "Map') f_vec_map.axes.imshow(image) f_vec_map.axes.set_axis_off() for vec in disp_color: f_vec_map.axes.arrow(vec[0],vec[1],vec[2]*a_len,vec[3]*a_len,color=vec[6], linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a", "x, y = P N = [a for a in A if (a[0]-x)**2", "+ '.tif',dpi=600,bbox_inches='tight') #Plot all the atoms on the image f_all = PlotCanvas() f_all.setWindowTitle('VecMap0.1:", "##Refined atoms positions for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find", "3. The author \" \\ \"acknowledges the HyperSpy and Atomap packages which \"\\", "=============================== def cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom # Read", "A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined atoms positions for A-site. NumPy array.", "for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions", "and dx < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 elif dx < 0", "disp (px), disp (nm), angle (deg)\\n') for data in disp_O: disp_data.write('{}, {}, {},", "data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') #Save the neigboring atoms as well with", "self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap) self.label_7.setGeometry(QtCore.QRect(20, 650, 41, 16)) self.label_7.setObjectName(\"label_7\")", "self.label_19.setText(_translate(\"VecMap\", \"VecMap 0.1.1 Released: 06/13/2020 by Dr. <NAME>\")) self.label_20.setText(_translate(\"VecMap\", \"Check here for more", "f_AB = PlotCanvas() f_AB.setWindowTitle('VecMap0.1: A-site atoms vs. B-site atoms') f_AB.axes.imshow(image) f_AB.axes.scatter(ap_A[:,0], ap_A[:,1], s=2,", "self.checkBox_3.isChecked(): plotpos = 1 else: plotpos = 0 try: #Refine atom positions print('='*50)", "self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20, 280, 371, 21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9", "QtWidgets.QLabel(VecMap) self.label_6.setGeometry(QtCore.QRect(130, 230, 251, 51)) self.label_6.setTextFormat(QtCore.Qt.AutoText) self.label_6.setScaledContents(False) self.label_6.setWordWrap(True) self.label_6.setObjectName(\"label_6\") self.line_2 = QtWidgets.QFrame(VecMap) self.line_2.setGeometry(QtCore.QRect(20,", "of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title", "error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey", "self.axes = [self.fig.add_subplot(3,3,n) for n in range(1,10)] self.fig.set_tight_layout(True) # Create the navigation toolbar,", "self.label_3 = QtWidgets.QLabel(VecMap) self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130,", "f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title", "__init__(self, parent=None): QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() #", "21)) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName(\"line_2\") self.label_9 = QtWidgets.QLabel(VecMap) self.label_9.setGeometry(QtCore.QRect(20, 300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2", "\"Initialize\")) self.pushButton_3.setText(_translate(\"VecMap\", \"Find \\n\" \"separation\")) self.label_5.setText(_translate(\"VecMap\", \"<html><head/><body><p>Input an appropriate separation factor to initialize", "Left mouse button x = np.float(event.xdata) y = np.float(event.ydata) atom_nearby = closest_node((x,y), A_positions)[0]", "factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep + 5))", "#lattice_list.append(sublattice_B) #Find the position of O atoms if find_O == 1: #Find initial", "file_path, title, scale, units, s, image, ABF, img_110 file = openfile_name[0] if self.checkBox.isChecked():", "data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to ' + my_path +", "red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\",", "then the subplot # configuration tool in the navigation toolbar wouldn't # work.", "dpi=self.dpi) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes", "like this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your support", "self.pushButton_8.clicked.connect(self.disclaimer) self.pushButton_9.clicked.connect(self.show_about) self.pushButton_10.clicked.connect(self.acknowledgments) self.pushButton_11.clicked.connect(self.show_contact) self.pushButton_12.clicked.connect(self.donate) def retranslateUi(self, VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\"))", "with the original image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site", "f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved to", "16)) self.label_14.setObjectName(\"label_14\") self.lineEdit_4 = QtWidgets.QLineEdit(VecMap) self.lineEdit_4.setGeometry(QtCore.QRect(20, 550, 251, 22)) self.lineEdit_4.setObjectName(\"lineEdit_4\") self.label_15 = QtWidgets.QLabel(VecMap)", "f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' +", "to calculate B site in relative to A site if self.radioButton.isChecked(): cal_site =", "import matplotlib.pyplot as plt import math import copy from scipy.spatial import distance from", "= QtWidgets.QLabel(VecMap) self.label_19.setGeometry(QtCore.QRect(60, 850, 291, 16)) self.label_19.setObjectName(\"label_19\") self.label_20 = QtWidgets.QLabel(VecMap) self.label_20.setGeometry(QtCore.QRect(20, 750, 211,", "source, \" \\ \"with or without modification, are permitted. Any redistribution must remain", "def setupUi(self, VecMap): VecMap.setObjectName(\"VecMap\") VecMap.resize(402, 876) VecMap.setMinimumSize(QtCore.QSize(402, 836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap)", "ideal_O_positions.append(o_3) ideal_O_positions = list(dict.fromkeys(ideal_O_positions)) return ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement", "Open file and set up global variables such as path etc. ====================== #=====", "= find_atom(s.data, A_positions, 'A-site atoms') print('Refining A-site atoms done!') ap_A = sublattice_A.atom_positions #Refined", "QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True) self.label_2.setObjectName(\"label_2\") self.lineEdit = QtWidgets.QLineEdit(VecMap) self.lineEdit.setGeometry(QtCore.QRect(130,", "the displacement of O atoms in relation to sublattice B. U_avg = (Ua", "vbox.addWidget(self.mpl_toolbar) vbox.addWidget(self.canvas) self.main_frame.setLayout(vbox) self.setCentralWidget(self.main_frame) #==================== Modules and helper functions =================================== from hyperspy.io import", "global file, my_path, file_path, title, scale, units, s, image, ABF, img_110 file =", "s_bar = 1 else: s_bar = 0 try: global f_vec_map_O f_vec_map_O = PlotCanvas()", "designed by Dr. <NAME>. Address your questions and suggestions to <EMAIL>. Please see", "!= 0: x = Dx / D y = Dy / D return", "from my <a href=\\\"http://www-personal.umich.edu/~taoma/VectorMap.html\\\">website</a>.\") msg.setWindowTitle(\"VecMap0.1: About\") returnValue = msg.exec() #============ Acknowledgments button ====================================================", "B, Ua, scale, img_110=False): #calculate the ideal atomic positions for A in a", "disp_data = [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin in lines[1:]:", "ms=5, color='r', ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error", "Set the coloring pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from", "and remove it. '/' for parent path path = file[:idx] + '/' return", "image=img, color=atom_color, name=atom_name) sublattice.find_nearest_neighbors() sublattice.refine_atom_positions_using_center_of_mass(show_progressbar=False) sublattice.refine_atom_positions_using_2d_gaussian(show_progressbar=False) return sublattice #Return an atomap sublattice object", "= s.axes_manager[0].scale units = s.axes_manager[0].units image = s.data disp = load_disp_data_from_csv(file) # Look", "= QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by Dr <NAME>. Redistribution", "in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for vec in vec_data_color: if", "f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map has been saved", "society.<br>\"\\ \"If you like this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\", "print('='*50) print('====Calculate {} in relative to {}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1,", "plot, we can use add_axes # instead of add_subplot, but then the subplot", "#Require Hyperspy package s = load(file) return s def getDirectory(file, s='.'): #Make the", "L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x =", "single color rendering, just leave it as [0]. ang_lst = [int(a) for a", "threashold in px, 0.707*a for [001] and 0.5*a for [110] x, y =", "* L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1]", "self.label_13.setObjectName(\"label_13\") self.checkBox_5 = QtWidgets.QCheckBox(VecMap) self.checkBox_5.setGeometry(QtCore.QRect(210, 650, 111, 20)) self.checkBox_5.setChecked(True) self.checkBox_5.setObjectName(\"checkBox_5\") self.retranslateUi(VecMap) QtCore.QMetaObject.connectSlotsByName(VecMap) #=======Connect", "\"distrubution\")) self.pushButton_6.setText(_translate(\"VecMap\", \"Show \\n\" \"map\")) self.label_18.setText(_translate(\"VecMap\", \"<html><head/><body><p>Generate a vector map. Set the coloring", "QMainWindow.__init__(self, parent) self.setWindowTitle('VecMap0.1: Plot') self.create_main_frame() def create_main_frame(self): self.main_frame = QWidget() # Create the", "the \"\\ \"app, please add the following reference: <br>\"\\ \"1. Ma, T. et", "my motivation!<br>\") msg.setWindowTitle(\"VecMap0.1: Donate me!\") returnValue = msg.exec() #=========== Define figure canvas ===================================================", "int(self.lineEdit_2.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try: # Read", "NavigationToolbar2QT as NavigationToolbar from matplotlib.figure import Figure class Ui_VecMap(QtWidgets.QMainWindow): def __init__(self): super(Ui_VecMap,self).__init__() self.setupUi(self)", "1: print('='*50) print('Saving result plots...') global f_A_site, f_B_site, f_AB #Plot A-site atom positions", "< 0 and dy < 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang", "navigation toolbar wouldn't # work. # self.axes = self.fig.add_subplot(111) # Create the navigation", "fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from the", "self.label_12.setGeometry(QtCore.QRect(170, 130, 191, 16)) self.label_12.setObjectName(\"label_12\") self.label_14 = QtWidgets.QLabel(VecMap) self.label_14.setGeometry(QtCore.QRect(20, 510, 381, 16)) self.label_14.setObjectName(\"label_14\")", "810, 120, 28)) self.pushButton_10.setObjectName(\"pushButton_10\") self.pushButton_11 = QtWidgets.QPushButton(VecMap) self.pushButton_11.setGeometry(QtCore.QRect(150, 810, 120, 28)) self.pushButton_11.setObjectName(\"pushButton_11\") self.pushButton_12", "20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420, 371, 21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11", "21)) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName(\"line_3\") self.label_11 = QtWidgets.QLabel(VecMap) self.label_11.setGeometry(QtCore.QRect(20, 440, 191, 16)) self.label_11.setObjectName(\"label_11\") self.label_12", "to calculate\")) self.pushButton_13.setText(_translate(\"VecMap\", \"Calculate\")) self.label_7.setText(_translate(\"VecMap\", \"Scale:\")) self.lineEdit_2.setText(_translate(\"VecMap\", \"10\")) self.pushButton_14.setText(_translate(\"VecMap\", \"Oxygen\\n\" \" map\")) self.lineEdit_3.setText(_translate(\"VecMap\",", "QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except", ", *.png ,*.bmp);;All Files (*)') global file, my_path, file_path, title, scale, units, s,", "f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if", "positions for O AB_positions = ap_A.tolist() + ap_B.tolist() sublattice_AB = Sublattice(AB_positions,image=s.data,color='y',name='Sublattice A +", "f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot B-site atom", "up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the image file", "self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg, disp, disp_O, disp_atom #", "self.pushButton_5 = QtWidgets.QPushButton(VecMap) self.pushButton_5.setGeometry(QtCore.QRect(280, 550, 101, 91)) self.pushButton_5.setObjectName(\"pushButton_5\") self.pushButton_6 = QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680,", "msg.setText(\"Please calculate the displacement first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() except IndexError: msg", "PROVIDED \\\"AS IS\\\", WITHOUT WARRANTY OF ANY KIND.<br>\") msg.setWindowTitle(\"VecMap0.1: Disclaimer\") def disclaimerButtonClick(): msg", "my_path, title, scale, units, disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement", "to the first version of VecMap --- a convenient tool to calculate atomic", "line function from two points A = (p1[1] - p2[1]) B = (p2[0]", "not os.path.exists(my_path): os.makedirs(my_path) s = readImage(file) title = s.metadata.General.title scale = s.axes_manager[0].scale #Read", "+ title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O atoms if available if", "< 0: vec_ang = math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx))", "y]) else: A_positions.pop(atom_nearby) replot(f_ini) def get_xy_pos_lists(atom_lst): return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos", "s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path + title + '_A-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot", "= 1 print('Found O displacement data!') else: find_O = 0 print('No O displacement", "print('='*50) print('Refining atom positions for A-site atoms...') print('This may take time...') sublattice_A =", "and save figures #======================= if plotpos == 1: print('='*50) print('Saving result plots...') global", "[] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the", "\"This app was designed by Dr <NAME>. Redistribution and use in source, \"", "of the vectors module =========================== #======== Connected to self.pushButton_5 ============================================= def vec_ang_dist(self): try:", "#Save the refined positions and original image as hdf5 file. This file can", "vec.append(color_lst[i+1]) for vec in vec_data_color: if len(vec) == 6: vec.append(color_lst[0]) return vec_data_color def", "480, 95, 20)) self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\")", "1: ap_2 = ap_O.tolist() ideal_O_pos = find_ideal_O_pos(ap_0, ap_1, U_avg, scale) disp_O = find_displacement(ap_2,", "QtWidgets.QPushButton(VecMap) self.pushButton_9.setGeometry(QtCore.QRect(150, 780, 120, 28)) self.pushButton_9.setObjectName(\"pushButton_9\") self.pushButton_10 = QtWidgets.QPushButton(VecMap) self.pushButton_10.setGeometry(QtCore.QRect(20, 810, 120, 28))", "= get_atom_positions(s,separation=sep) global A_positions, f_ini A_positions = A_positions_ini.tolist() f_ini = PlotCanvas() f_ini.setWindowTitle('Initial atom", "self.line_4 = QtWidgets.QFrame(VecMap) self.line_4.setGeometry(QtCore.QRect(20, 730, 371, 21)) self.line_4.setFrameShape(QtWidgets.QFrame.HLine) self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_4.setObjectName(\"line_4\") self.pushButton_8 = QtWidgets.QPushButton(VecMap)", "fit...') print('This may take time...') image_without_A = remove_atoms_from_image_using_2d_gaussian(sublattice_A.image, sublattice_A, show_progressbar=False) #Refine B-site atoms", "1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite structure. Support", "position module =============================================== #==== Connected to self.pushButton_2 ================================================ def ini_atom_position(self): sep = int(self.lineEdit.text())", "Oxygen atoms') f_vec_map_O.axes.imshow(image) f_vec_map_O.axes.set_axis_off() for vec in disp_O: f_vec_map_O.axes.arrow(vec[0],vec[1],vec[2]*O_len,vec[3]*O_len,color='red',linewidth=1,head_width=O_len/3,head_length=O_len/3) #Add a scale bar", "color rendering, just leave it as [0]. ang_lst = [int(a) for a in", "the original image #Refine O positions print('='*50) print('Refining atom positions for sublattice O...')", "self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\", "= FigureCanvas(self.fig) self.canvas.setParent(self.main_frame) # Add a 9x9 axes layout # self.axes = [self.fig.add_subplot(3,3,n)", "\"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\"))", "\"Step 1. Load image\")) self.label_2.setText(_translate(\"VecMap\", \"<html><head/><body><p>Load a HR-STEM image with a perovskite structure.", "Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0], ap_A[:,1], s=2, color='r') f_A_site.axes.set_axis_off() f_A_site.show() f_A_site.fig.savefig(my_path", "disp = find_displacement(ap_0, ideal_pos, scale) #Save the displacement data with open(my_path + title", "file. This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot", "self.label_16 = QtWidgets.QLabel(VecMap) self.label_16.setGeometry(QtCore.QRect(20, 580, 381, 16)) self.label_16.setObjectName(\"label_16\") self.label_17 = QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600,", "= [] for idx in range(len(ang_lst_mod)-1): ang_bond.append((ang_lst_mod[idx + 1] - ang_lst_mod[idx]) // 2", "self.checkBox_4 = QtWidgets.QCheckBox(VecMap) self.checkBox_4.setGeometry(QtCore.QRect(260, 10, 111, 20)) self.checkBox_4.setObjectName(\"checkBox_4\") self.line_3 = QtWidgets.QFrame(VecMap) self.line_3.setGeometry(QtCore.QRect(20, 420,", "your cursor on the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except NameError:", "#Refined atoms positions for O. NumPy array. print('Refining O atoms done!') #lattice_list.append(sublattice_O) print('Refining", "of VecMap --- a convenient tool to calculate atomic displacements in perovskite structures", "else: img_110 = 0 if file: print('{} has been loaded!'.format(file)) my_path = getDirectory(file)", "371, 21)) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName(\"line\") self.label = QtWidgets.QLabel(VecMap) self.label.setGeometry(QtCore.QRect(20, 10, 121, 16)) self.label.setObjectName(\"label\")", "f_O_site.axes.imshow(image) f_O_site.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_O_site.axes.set_axis_off() f_O_site.show() f_O_site.fig.savefig(my_path + title + '_O atoms'", "#Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom", "x = Dx / D y = Dy / D return x,y else:", "Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] *", "import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import", "[0]. ang_lst = [int(a) for a in ang_lst] color_lst = str(self.lineEdit_5.text()).split() #====Plot==== disp_color", "\"app, please add the following reference: <br>\"\\ \"1. Ma, T. et al. <a", "Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue = msg.exec() #============ Contact", "positions for B-site. NumPy array. print('Refining B-site atoms done!') #lattice_list.append(sublattice_B) #Find the position", "self.label_3.setGeometry(QtCore.QRect(20, 110, 191, 16)) self.label_3.setObjectName(\"label_3\") self.label_4 = QtWidgets.QLabel(VecMap) self.label_4.setGeometry(QtCore.QRect(20, 130, 111, 16)) self.label_4.setObjectName(\"label_4\")", "neighboring atoms N = sorted(N, key=lambda x: (x[0] ** 2 + x[1] **", "= QtWidgets.QPushButton(VecMap) self.pushButton_6.setGeometry(QtCore.QRect(20, 680, 80, 41)) self.pushButton_6.setObjectName(\"pushButton_6\") self.label_18 = QtWidgets.QLabel(VecMap) self.label_18.setGeometry(QtCore.QRect(200, 680, 191,", "#====Helper functions, do not change==== def readImage(file): #Load raw image file for process.", "{}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical)", "msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version", "Create the mpl Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch #", "+ 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx, dy, scale*1000*vec_len,", "from the image. #========================================================================= #The main scripts start from here if cal_site ==", "O_len = int(self.lineEdit_3.text()) if self.checkBox_5.isChecked(): s_bar = 1 else: s_bar = 0 try:", "\"_O_vec_map_by_{}.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"No O displacement data exist!\")", "def acknowledgments(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"This program was written with Python 3.", "navigation toolbar, tied to the canvas # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) vbox =", "ideal atomic positions for O in a un-distorted perovskite structure #only support [001]", "program. Please \"\\ \"consider citing/adding acknowledgement for Hyperspy \"\\ \"and Atomap packages in", "returnValue = msg.exec() #============ Donate me button ==================================================== #============ Connected to self.pushButton_12 =======================================", "to self.pushButton_11 ======================================= def show_contact(self): msg = QMessageBox() msg.setText(\"Ask questions and report bugs", "separation factor' #s_peaks.plot(colorbar=False,scalebar=False,axes_off=True) sep = int(self.lineEdit.text()) sep_range = list(range(sep - 4, sep +", "gaussian fit...') print('This may take time...') image_without_AB=remove_atoms_from_image_using_2d_gaussian(sublattice_B.image,sublattice_B,show_progressbar=False) #Subtract both A and B from", "as hdf5 file. This file can be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True)", "for data in lin_data]) return disp_data #====Application entry================================== def main(): print('='*50) print(''' Welcome", "{}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic displacement data saved to '", "al. <a href=\\\"https://doi.org/10.1186/s40679-017-0042-5\\\">Adv. Struct. Chem. Imaging 3, 9 (2017).</a>\") msg.setWindowTitle(\"VecMap0.1: Acknowledgments\") returnValue =", "= QtGui.QFont() font.setBold(True) font.setWeight(75) self.pushButton_12.setFont(font) self.pushButton_12.setObjectName(\"pushButton_12\") self.radioButton = QtWidgets.QRadioButton(VecMap) self.radioButton.setGeometry(QtCore.QRect(20, 480, 95, 20))", "color='b') f_AB.axes.set_axis_off() f_AB.show() f_AB.fig.savefig(my_path + title + '_A_and_B-site atoms' + '.tif',dpi=600,bbox_inches='tight') #Plot O", "be called later. #atom_lattice.save(my_path + 'atom_position.hdf5', overwrite=True) #======================= #Plot and save figures #=======================", "continue ini_position = get_atom_positions(s, separation=s_factor) f_sep.axes[i].imshow(s.data) f_sep.axes[i].scatter(np.asarray(ini_position)[:,0], np.asarray(ini_position)[:,1], s=5, color='r') f_sep.axes[i].set_title('Separation = {}'.format(s_factor))", "arrow_end[0]-atom[0] dy = arrow_end[1]-atom[1] #calculate the displacement vector angle according to dx, dy.", "QMessageBox() # msg.setIcon(QMessageBox.Information) msg.setText(\"VecMap v0.1.1\"\\ \"<br>\"\\ \"Designed by Dr. <NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\"", "bar\")) #===== Open file and set up global variables such as path etc.", "coloring pattern by checking the vector angle distribution.</p></body></html>\")) self.pushButton_7.setText(_translate(\"VecMap\", \"Load from csv\")) self.pushButton_8.setText(_translate(\"VecMap\",", "vector map has been saved to ' + my_path + title + \"_O_vec_map_by_{}.tif!", "len(Neighbor) == 4: n_0 = Neighbor.pop(0) n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0])", "QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import * from PyQt5.QtCore import * import matplotlib", "QMessageBox() msg.setText(\"Ask questions and report bugs to:\"\\ \"<br>\" \"<a href=\\\"mailto:<EMAIL>\\\"><EMAIL></a>\") msg.setWindowTitle(\"VecMap0.1: Contact\") returnValue", "= (int(self.lineEdit_2.text()), int(self.lineEdit_3.text())) #s_peaks=am.get_feature_separation(s, separation_range=sep_range) #Range might be changed for different images #s_peaks.metadata.General.title", "For single color rendering, just leave it as [0]. ang_lst = [int(a) for", "msg.exec() #==== Find separation module ======================================================== #==== Connected to self.pushButton_3 ================================================ def find_separation(self):", "perovskite structures This app was designed by Dr. <NAME>. Address your questions and", "atoms positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the", "121, 16)) self.label.setObjectName(\"label\") self.label_2 = QtWidgets.QLabel(VecMap) self.label_2.setGeometry(QtCore.QRect(130, 40, 251, 51)) self.label_2.setTextFormat(QtCore.Qt.AutoText) self.label_2.setScaledContents(False) self.label_2.setWordWrap(True)", "Files (*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/') s = readImage(my_path", "= list(range(sep - 4, sep + 5)) # Create canvas for drawing try:", "and Atomap packages which \"\\ \"are partially incorporated in the program. Please \"\\", "ScaleBar(scale,'nm',location='lower left',scale_loc='top',sep=2) f_vec_map_O.axes.add_artist(scalebar) f_vec_map_O.show() f_vec_map_O.fig.savefig(my_path + title + \"_O_vec_map_by_{}.tif\".format(disp_atom),dpi=1200,bbox_inches='tight',overwrite=True) print('The O vector map", "+ 360 for i in range(len(ang_bond)-1): if round(ang) in range(ang_bond[i], ang_bond[i+1]): vec.append(color_lst[i+1]) for", "the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the", "== 1: print('All figures have been saved to '+ my_path) except NameError: #Pop", "L1[2] * L2[0] if D != 0: x = Dx / D y", "FigCanvas objects. # 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig =", "B. #atom_lattice = am.Atom_Lattice(image=image, name='Atoms positions', sublattice_list=lattice_list) #Save the refined positions and original", "of O atoms in relation to sublattice B. U_avg = (Ua + Uc)/2", "use add_axes # instead of add_subplot, but then the subplot # configuration tool", "# Create the mpl Figure and FigCanvas objects. # 10x10 inches, 100 dots-per-inch", "return np.asarray(atom_lst)[:,0], np.asarray(atom_lst)[:,1] def replot(f): x_pos, y_pos = get_xy_pos_lists(A_positions) dp.set_xdata(x_pos) dp.set_ydata(y_pos) f.fig.canvas.draw() f.fig.canvas.flush_events()", "positions for refining') f_ini.axes.imshow(s.data) f_ini.axes.set_axis_off() f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show()", "else: s_bar = 0 try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A", "NameError: #Pop up an error window msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please load the", "positions', sublattice_list=lattice_list) #Save the refined positions and original image as hdf5 file. This", "msg.setIcon(QMessageBox.Critical) msg.setText(\"Please initialize the atom positions first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #====================", "find the intersection point of two lines D = L1[0] * L2[1] -", "self.radioButton.setChecked(True) self.radioButton.setObjectName(\"radioButton\") self.radioButton_2 = QtWidgets.QRadioButton(VecMap) self.radioButton_2.setGeometry(QtCore.QRect(90, 480, 95, 20)) self.radioButton_2.setObjectName(\"radioButton_2\") self.label_21 = QtWidgets.QLabel(VecMap)", "image overlayed. f_A_site = PlotCanvas() f_A_site.setWindowTitle('VecMap0.1: Refined positions of A-site atoms') f_A_site.axes.imshow(image) f_A_site.axes.scatter(ap_A[:,0],", "8-12\")) self.label_14.setText(_translate(\"VecMap\", \"List of angles (degrees) of vectors that will be colored differently:\"))", "#==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global variables global U_avg, disp,", "ideal_O_positions def find_displacement(A, A_com, scale): #find atomic displacement of A #A_com, A are", "#Return an atomap sublattice object def find_neighboring_atoms(P, A, Ua, tol=1.2): # Define a", "as disp: disp_data = [] lines = disp.readlines() print('Displacement data:\\n') print(lines[0]) for lin", "= 1 else: ABF = 0 if self.checkBox_4.isChecked(): img_110 = 1 else: img_110", "f_sep.axes[i].set_title('Separation = {}'.format(s_factor)) f_sep.show() except NameError: #Pop up an error window msg =", "file for process. #Require Hyperspy package s = load(file) return s def getDirectory(file,", "positions for A-site. NumPy array. #lattice_list = [] #lattice_list.append(sublattice_A) print('='*50) print('Finding the initial", "f_ini.axes.set_title('Left click to add or remove atoms') f_ini.show() def onclick(event): if event.inaxes !=", "title + \"_{}_vec_map.tif! Enjoy!\".format(disp_atom)) except NameError: msg = QMessageBox() msg.setIcon(QMessageBox.Critical) msg.setText(\"Please calculate the", "#Set the working path file_path = getDirectory(file, '/') #Set the parent path if", "positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis for the initial position", "color='r') f_all.axes.scatter(ap_B[:,0], ap_B[:,1], s=2, color='b') f_all.axes.scatter(ap_O[:,0], ap_O[:,1], s=2, color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path +", "def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\" \\ \"This app was designed by", "P:a given atom (x,y); A: a list of atoms; Ua: A threashold in", "#Subtract both A and B from the original image #Refine O positions print('='*50)", "f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position module", "might be changed for different images #s_peaks.metadata.General.title = 'Use Arrow keys to find", "Calculate displacement module ================================= #==================== Connected to self.pushButton_13 =============================== def cal_disp(self): try: #Global", "try: # Read from lineEdits: ang_lst = str(self.lineEdit_4.text()).split() #A list of displacement directions.", "= math.degrees(math.atan(dy/dx)) + 180 else: vec_ang = 360 + math.degrees(math.atan(dy/dx)) disp.append([atom[0], atom[1], dx,", "linewidth=1, head_width=a_len/3, head_length=a_len/3) #Add a scale bar if s_bar == 1: scalebar =", "disp, disp_O, image, disp_atom openfile_name = QFileDialog.getOpenFileName(self,'Select the displacement data','','CSV (*.csv);;All Files (*)')", "calculate A site in relative to B site; 1 to calculate B site", "msg.setText(\"Please load the image file first!\") msg.setWindowTitle(\"Hey guys\") returnValue = msg.exec() #==== Find", "= QtWidgets.QLabel(VecMap) self.label_17.setGeometry(QtCore.QRect(20, 600, 181, 16)) self.label_17.setObjectName(\"label_17\") self.lineEdit_5 = QtWidgets.QLineEdit(VecMap) self.lineEdit_5.setGeometry(QtCore.QRect(20, 620, 251,", "16)) self.label_21.setObjectName(\"label_21\") self.pushButton_13 = QtWidgets.QPushButton(VecMap) self.pushButton_13.setGeometry(QtCore.QRect(200, 460, 81, 51)) self.pushButton_13.setObjectName(\"pushButton_13\") self.label_7 = QtWidgets.QLabel(VecMap)", "n_1 = Neighbor.pop(closest_node(n_0, Neighbor)[0]) n_2 = Neighbor.pop(closest_node(n_0,Neighbor)[0]) n_3 = Neighbor.pop() o_0 = (n_0[0]", "of displacement directions. This is used to determine the coloring pattern. For single", "ap_center = ((Neighbor[0][0]+Neighbor[1][0])/2,(Neighbor[0][1]+Neighbor[1][1])/2) ideal_positions.append(ap_center) Neighbor_positions.append(Neighbor) return ideal_positions, Neighbor_positions def find_ideal_O_pos(A, B, Ua, scale):", "ap_1 = ap_B.tolist() else: disp_atom = 'B-site' rel_atom = 'A-site' ap_0 = ap_B.tolist()", "30, 20)) self.lineEdit_2.setObjectName(\"lineEdit_2\") self.pushButton_14 = QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 =", "diagonal of a M = [b,c,d] diag_idx = distance.cdist([a],M).argmax() L1 = line(a,M[diag_idx]) del", "836)) VecMap.setMaximumSize(QtCore.QSize(1024, 1024)) self.pushButton = QtWidgets.QPushButton(VecMap) self.pushButton.setGeometry(QtCore.QRect(20, 40, 91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox =", "<NAME>\"\\ \"<br>\"\\ \"06/13/2020\"\\ \"<br>\" \"First version release!<br>\" \"Get more information and<br> source code", "done!') #lattice_list.append(sublattice_O) print('Refining atoms done!') #Construct atom position results with sublattice A and", "color='g') f_all.axes.set_axis_off() f_all.show() f_all.fig.savefig(my_path + title + '_A_B_O atoms' + '.tif',dpi=600,bbox_inches='tight') if plotpos", "VecMap): _translate = QtCore.QCoreApplication.translate VecMap.setWindowTitle(_translate(\"VecMap\", \"VecMap0.1\")) #VecMap.setWindowIcon(QtGui.QIcon('icon.png')) self.pushButton.setText(_translate(\"VecMap\", \"Load Image\")) self.checkBox.setText(_translate(\"VecMap\", \"ABF/BF image\"))", "guys\") returnValue = msg.exec() print('') #========= Generate vector map module ============================================= #========= Connected", "x disp (px), y disp (px), disp (nm), angle (deg)\\n') for data in", "def find_ideal_pos(A, B, Ua, scale, img_110=False): #calculate the ideal atomic positions for A", "print('Atomic displacement data saved to ' + my_path + title + '-disp.csv.') except", "add the following reference: <br>\"\\ \"1. Ma, T. et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev.", "91, 41)) self.pushButton.setObjectName(\"pushButton\") self.checkBox = QtWidgets.QCheckBox(VecMap) self.checkBox.setGeometry(QtCore.QRect(150, 10, 111, 20)) self.checkBox.setObjectName(\"checkBox\") self.line =", "'Original image.hspy', overwrite=True) #Save a backup file in hspy format image = s.data", "{}===='.format(disp_atom, rel_atom)) ideal_pos, neighbor_pos = find_ideal_pos(ap_0, ap_1, U_avg, scale) disp = find_displacement(ap_0, ideal_pos,", "135 225 315\")) self.label_16.setText(_translate(\"VecMap\", \"List of colors (should match the angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g.,", "you like this app, show your appreciation by <a href=\\\"https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=NQTP8WZX9VDRQ&currency_code=USD&source=url\\\">donating me!</a>\"\\ \"<br>\"\\ \"Your", "==================================================== #============ Connected to self.pushButton_8 ======================================= def disclaimer(self): msg = QMessageBox() msg.setIcon(QMessageBox.Information) msg.setText(\"<b>Disclaimer</b><br>\"", "ls='') cid = f_ini.fig.canvas.mpl_connect('button_press_event', onclick) except NameError: #Pop up an error window msg", "displacement data','','CSV (*.csv);;All Files (*)') file = openfile_name[0] if file: my_path = getDirectory(file,'/')", "angles):\")) self.label_17.setText(_translate(\"VecMap\", \"e.g., yellow blue red green\")) self.lineEdit_5.setText(_translate(\"VecMap\", \"yellow\")) self.pushButton_5.setText(_translate(\"VecMap\", \"Vector angle\\n\" \"distrubution\"))", "f_original_img.axes.imshow(image) f_original_img.axes.set_axis_off() f_original_img.axes.set_title('{} \\n has been successfully loaded!'.format(title)) f_original_img.show() #==== Initialize atom position", "et al. <a href=\\\"https://doi.org/10.1103/PhysRevLett.123.217602\\\">Phys. Rev. Lett. 123, 217602 (2019).</a>\"\\ \"<br>\"\\ \"2. Ma, T.", "calculate B site in relative to A site if self.radioButton.isChecked(): cal_site = 0", "for A in a un-distorted perovskite structure #A, B are lists of atom", "on the peak(s) to see the\\n displacement directions') f_vec_ang_dist.show() except NameError: msg =", "drawing try: global f_sep f_sep = SeparationCanvas() for i in range(9): s_factor =", "self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3 = QtWidgets.QCheckBox(VecMap) self.checkBox_3.setGeometry(QtCore.QRect(150, 330, 131, 20)) self.checkBox_3.setObjectName(\"checkBox_3\")", "# 5x4 inches, 100 dots-per-inch # self.dpi = 100 self.fig = Figure((5.0, 4.0),", "300, 191, 16)) self.label_9.setObjectName(\"label_9\") self.checkBox_2 = QtWidgets.QCheckBox(VecMap) self.checkBox_2.setGeometry(QtCore.QRect(20, 330, 111, 20)) self.checkBox_2.setObjectName(\"checkBox_2\") self.checkBox_3", "positions\")) self.checkBox_2.setText(_translate(\"VecMap\", \"Refine Oxygen\")) self.checkBox_3.setText(_translate(\"VecMap\", \"Save result plots\")) self.pushButton_4.setText(_translate(\"VecMap\", \"Refine\")) self.label_10.setText(_translate(\"VecMap\", \"<html><head/><body><p>Refine atom", "disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3], data[4], data[5])) disp_data.write('\\n') print('Atomic", "readImage(file): #Load raw image file for process. #Require Hyperspy package s = load(file)", "written with Python 3. The author \" \\ \"acknowledges the HyperSpy and Atomap", "initial position of B: typically 3 for [001] and 1 for [110] if", "directory and return the path. for idx in range(-1, -len(file), -1): if file[idx]", "print('='*50) print('Finding the initial positions for B-site atoms...') sublattice_A.construct_zone_axes() #Find the zone axis", "matplotlib.pyplot as plt import math import copy from scipy.spatial import distance from matplotlib_scalebar.scalebar", "for data in disp_O: disp_data.write('{}, {}, {}, {}, {}, {}'.format(data[0], data[1], data[2], data[3],", "to calculate A site in relative to B site; 1 to calculate B", "= QtWidgets.QPushButton(VecMap) self.pushButton_14.setGeometry(QtCore.QRect(110, 680, 80, 41)) self.pushButton_14.setObjectName(\"pushButton_14\") self.lineEdit_3 = QtWidgets.QLineEdit(VecMap) self.lineEdit_3.setGeometry(QtCore.QRect(150, 650, 30," ]
[ "occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1,", "1, 1, 2, 2, 2, 1, 1, 3, 3, 3]), np.array([1, 1]) )", "1, 2, 2, 2, 1, 1, 3, 3, 3]), np.array([1, 1]) ) ==", "count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array) - len(subset_array) + 1):", "subset_array): occurrences = 0 for idx in range(len(array) - len(subset_array) + 1): if", "for idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array):", "range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1", "len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1,", "+= 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2,", "np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences(", "2, 2, 1, 1, 3, 3, 3]), np.array([1, 1]) ) == 3 test_base_case()", "np def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array) - len(subset_array)", "occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1,", "1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2,", "2, 2, 2, 1, 1, 3, 3, 3]), np.array([1, 1]) ) == 3", "if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case(): assert", "np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]), np.array([1,", "in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences +=", "as np def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array) -", "len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences", "count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]),", "numpy as np def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array)", "idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences", "= 0 for idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx +", "occurrences = 0 for idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx", "- len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return", "+ 1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences def", "return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2,", "def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in range(len(array) - len(subset_array) +", "assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3, 3,", "+ len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0,", "test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1, 1, 3,", "import numpy as np def count_subset_occurrences(array, subset_array): occurrences = 0 for idx in", "1): if np.array_equal(array[idx:(idx + len(subset_array))], subset_array): occurrences += 1 return occurrences def test_base_case():", "def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1, 1, 2, 2, 2, 1, 1,", "1, 1, 1, 2, 2, 2, 1, 1, 3, 3, 3]), np.array([1, 1])", "0 for idx in range(len(array) - len(subset_array) + 1): if np.array_equal(array[idx:(idx + len(subset_array))],", "subset_array): occurrences += 1 return occurrences def test_base_case(): assert count_subset_occurrences( np.array([0, 1, 1," ]
[]
[ "cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] >", "img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) &", "as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] >", "(img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0 cv2.imshow('image', img) cv2.waitKey(0)", "https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G > B` import cv2 import", "<gh_stars>100-1000 #!/usr/bin/env python3 # date: 2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R", "= cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1]", "# replaca pixel when `R > G > B` import cv2 import numpy", "> G > B` import cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png')", "when `R > G > B` import cv2 import numpy as np img", "# https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G > B` import cv2", "> B` import cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img", "# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0])", "cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0", "import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[", "G > B` import cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') #", "numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2]", "python3 # date: 2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G", "pixel when `R > G > B` import cv2 import numpy as np", "import cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img,", "np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1])", "#!/usr/bin/env python3 # date: 2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R >", "replaca pixel when `R > G > B` import cv2 import numpy as", "= cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] =", "img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ]", "cv2.COLOR_BGR2RGB) img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0 cv2.imshow('image',", "# date: 2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G >", "2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G > B` import", "cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)", "date: 2019.09.24 # https://stackoverflow.com/questions/58085439/opencv-extract-pixels-with-rbg/ # replaca pixel when `R > G > B`", "B` import cv2 import numpy as np img = cv2.imread('/home/furas/Obrazy/images/image.png') # img =", "img[ (img[:,:,2] > img[:,:,1]) & (img[:,:,1] > img[:,:,0]) ] = 0 cv2.imshow('image', img)", "`R > G > B` import cv2 import numpy as np img =" ]
[ "rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7,", "column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7,", "row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9)", "PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"),", "colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"),", "defined as executing executable file, python file or using list of bash commands.", "rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7,", "print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\",", "row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT", "\"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2,", "\"\"\" Creates view designed to create action and attach it to particular command.", "python file or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8,", "colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5,", "colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9)", "row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH", "rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7,", "self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13,", "rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2,", "using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\",", "row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4,", "as executing executable file, python file or using list of bash commands. \"\"\"", "Creates view designed to create action and attach it to particular command. Action", "action and attach it to particular command. Action can be defined as executing", "self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\",", "COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW", "Api from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app): self.__app = app", "class ActionsUI: def __init__(self, app): self.__app = app def append_its_content(self): \"\"\" Creates view", "colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2,", "\"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2,", "to particular command. Action can be defined as executing executable file, python file", "executable file, python file or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1,", "attach it to particular command. Action can be defined as executing executable file,", "command. Action can be defined as executing executable file, python file or using", "append_its_content(self): \"\"\" Creates view designed to create action and attach it to particular", "be defined as executing executable file, python file or using list of bash", "self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13,", "or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH", "rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\",", "from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app): self.__app = app def", "column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1,", "list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\",", "column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13,", "executing executable file, python file or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\",", "row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9)", "rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON", "rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\",", "print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2,", "colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13,", "self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5)", "gui class ActionsUI: def __init__(self, app): self.__app = app def append_its_content(self): \"\"\" Creates", "\"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"), row=13, column=13,", "print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5)", "commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1,", "rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH", "self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\",", "\"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2,", "app): self.__app = app def append_its_content(self): \"\"\" Creates view designed to create action", "row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"), row=13, column=13, rowspan=2, colspan=9)", "ActionsUI: def __init__(self, app): self.__app = app def append_its_content(self): \"\"\" Creates view designed", "self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13,", "row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\",", "self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET", "src.api import Api from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app): self.__app", "view designed to create action and attach it to particular command. Action can", "of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"),", "row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\",", "column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"), row=13, column=13, rowspan=2, colspan=9) print(\"TODO\")", "src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app): self.__app = app def append_its_content(self):", "print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5)", "row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\",", "row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"),", "print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5)", "def append_its_content(self): \"\"\" Creates view designed to create action and attach it to", "self.__app = app def append_its_content(self): \"\"\" Creates view designed to create action and", "column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET", "\"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2,", "file, python file or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1,", "self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5)", "Action can be defined as executing executable file, python file or using list", "rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\",", "= app def append_its_content(self): \"\"\" Creates view designed to create action and attach", "and attach it to particular command. Action can be defined as executing executable", "\"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13,", "COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1,", "print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"), row=13, column=13, rowspan=2,", "FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\", \"ACTIONS_SET_BASH_COMMAND_BUTTON\", print(\"TODO\"), row=13,", "rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\",", "FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10,", "\"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\",", "app def append_its_content(self): \"\"\" Creates view designed to create action and attach it", "def __init__(self, app): self.__app = app def append_its_content(self): \"\"\" Creates view designed to", "self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2,", "can be defined as executing executable file, python file or using list of", "NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\",", "colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET BASH COMMAND\",", "colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"),", "designed to create action and attach it to particular command. Action can be", "COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7,", "row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE", "\"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9)", "column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\",", "file or using list of bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5)", "__init__(self, app): self.__app = app def append_its_content(self): \"\"\" Creates view designed to create", "COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7,", "self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5)", "self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\", print(\"TODO\"), row=7, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10,", "create action and attach it to particular command. Action can be defined as", "particular command. Action can be defined as executing executable file, python file or", "from src.api import Api from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app):", "import Api from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self, app): self.__app =", "column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION", "<gh_stars>0 from src.api import Api from src.gui.appJar.appjar import gui class ActionsUI: def __init__(self,", "COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2, column=13, rowspan=2,", "column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\",", "print(\"TODO\"), row=13, column=1, rowspan=2, colspan=5) self.__app.addListBox(\"ACTIONS_ACTION_LISTBOX\", row=1, column=7, rowspan=5, colspan=5) self.__app.addNamedButton(\"NEW COMMAND\", \"ACTIONS_NEW_COMMAND\",", "colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\",", "column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"), row=10, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET", "colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\", \"ACTIONS_REMOVE_COMMAND\", print(\"TODO\"), row=13, column=7, rowspan=2, colspan=5) self.__app.addLabel(\"ACTIONS_ACTION_NAME_LABEL\", \"ACTION NAME\", row=2,", "column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"EDIT COMMAND\", \"ACTIONS_EDIT_COMMAND\", print(\"TODO\"), row=10, column=7, rowspan=2, colspan=5) self.__app.addNamedButton(\"REMOVE COMMAND\",", "bash commands. \"\"\" self.__app.addListBox(\"ACTIONS_COMMAND_LISTBOX\", row=1, column=1, rowspan=8, colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10,", "import gui class ActionsUI: def __init__(self, app): self.__app = app def append_its_content(self): \"\"\"", "\"ACTION NAME\", row=2, column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE", "column=13, rowspan=2, colspan=9) self.__app.addEntry(\"ACTIONS_ACTION_NAME_ENTRY\", row=4, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"),", "colspan=5) self.__app.addNamedButton(\"ATTACH COMMAND\", \"ACTIONS_ATTACH_COMMAND\", print(\"TODO\"), row=10, column=1, rowspan=2, colspan=5) self.__app.addNamedButton(\"DETACH COMMAND\", \"ACTIONS_DETACH_COMMAND\", print(\"TODO\"),", "EXE FILE\", \"ACTIONS_SET_EXE_FILE_BUTTON\", print(\"TODO\"), row=7, column=13, rowspan=2, colspan=9) self.__app.addNamedButton(\"SET PYTHON FILE\", \"ACTIONS_SET_PY_FILE_BUTTON\", print(\"TODO\"),", "it to particular command. Action can be defined as executing executable file, python", "to create action and attach it to particular command. Action can be defined" ]
[ "coding: utf-8 -*- \"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper", ".SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import SARibbonQuickAccessBar from .SAWindowButtonGroup", "\"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import", "SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow", "-*- coding: utf-8 -*- \"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import", ".SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory", "from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from", "from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from", "\"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget", "import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import", "import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import SARibbonQuickAccessBar from .SAWindowButtonGroup import", ".SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory", "import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import", "SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import SARibbonQuickAccessBar", ".SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery", "-*- \"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar", "SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import SARibbonQuickAccessBar from .SAWindowButtonGroup import SAWindowButtonGroup", "@Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget", "import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import", "from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from", ".SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow", "ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import", "__init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from", "from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from", "from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from", "SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel", "SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory", "SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout", "import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import", ".SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar", "@Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar", "import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import", "utf-8 -*- \"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper import SAFramelessHelper from", ".SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel", ".SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout", "import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import", "import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import", "from .SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from", "SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory", "from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout from .SARibbonQuickAccessBar import SARibbonQuickAccessBar from", "from .SAFramelessHelper import SAFramelessHelper from .SARibbonBar import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from", ".SARibbonGallery import SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout", "SARibbonGallery from .SARibbonMainWindow import SARibbonMainWindow from .SARibbonPannel import SARibbonPannel from .SARibbonPannelLayout import SARibbonPannelLayout", "from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from", "import SARibbonBar from .SARibbonButtonGroupWidget import SARibbonButtonGroupWidget from .SARibbonCategory import SARibbonCategory from .SARibbonCategoryLayout import", "SARibbonCategory from .SARibbonCategoryLayout import SARibbonCategoryLayout from .SARibbonContextCategory import SARibbonContextCategory from .SARibbonGallery import SARibbonGallery", "# -*- coding: utf-8 -*- \"\"\" @Module __init__.py @Author ROOT \"\"\" from .SAFramelessHelper" ]
[ "LoginManager from werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager from datetime import", "import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ =", "db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique = True, index = True) profile_picture", "id = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username", "profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy", "True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog',", "from flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from ..", "blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property def password(self): raise", "import db from flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash", "unique = True, index = True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password", "db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments',", "'dynamic') @property def password(self): raise AttributeError('You cannot view a users password') @password.setter def", "db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique", "@property def password(self): raise AttributeError('You cannot view a users password') @password.setter def password(self,", "login_manager from datetime import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin,", "@login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id =", "cannot view a users password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password) def", "datetime import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__", "= db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property def password(self): raise AttributeError('You", "True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email", "@password.setter def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password)", "User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) firstname =", "password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password) @classmethod def save_user(self):", "flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from .. import", "blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref", "= db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique = True, index = True)", "= db.Column(db.String(255), unique = True, index = True) profile_picture = db.Column(db.String()) profile_bio =", "= db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy =", "view a users password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self,", "= 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy =", "= 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property def", "lazy = 'dynamic') @property def password(self): raise AttributeError('You cannot view a users password')", "__tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname", "User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True)", "True, index = True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255))", "import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager", "db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment',", "raise AttributeError('You cannot view a users password') @password.setter def password(self, password): self.secured_password =", "primary_key = True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique =", "import generate_password_hash, check_password_hash from .. import login_manager from datetime import date, datetime @login_manager.user_loader", "def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password) @classmethod", "= True, index = True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password =", "password(self): raise AttributeError('You cannot view a users password') @password.setter def password(self, password): self.secured_password", "self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password) @classmethod def save_user(self): db.session.add(self)", "= db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref =", "= db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique = True,", "= True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True)", "db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property def password(self): raise AttributeError('You cannot", "'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property def password(self):", "= db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255),", "login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager from datetime", "password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password,", "db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255))", "'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic')", "= db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me =", "load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key", "check_password_hash from .. import login_manager from datetime import date, datetime @login_manager.user_loader def load_user(id):", "email = db.Column(db.String(255), unique = True, index = True) profile_picture = db.Column(db.String()) profile_bio", "return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key =", "a users password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password):", "True) email = db.Column(db.String(255), unique = True, index = True) profile_picture = db.Column(db.String())", "'users' id = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255))", "username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique = True, index =", "firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email =", "secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me", "secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique =", "db.Column(db.String(255), unique = True, index = True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255))", "db.Column(db.String(255)) username = db.Column(db.String(255),unique = True) email = db.Column(db.String(255), unique = True, index", "profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref", ".. import login_manager from datetime import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id))", "= db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username =", "= 'users' id = db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname =", "def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer,", "= 'myblogcomments', lazy = 'dynamic') @property def password(self): raise AttributeError('You cannot view a", "from datetime import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model):", "password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password) @classmethod def", "class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) firstname", "db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts',", "def password(self): raise AttributeError('You cannot view a users password') @password.setter def password(self, password):", "from .. import login_manager from datetime import date, datetime @login_manager.user_loader def load_user(id): return", "import login_manager from datetime import date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class", "AttributeError('You cannot view a users password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password)", "'myblogcomments', lazy = 'dynamic') @property def password(self): raise AttributeError('You cannot view a users", "= db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref =", "= True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me =", "= True) email = db.Column(db.String(255), unique = True, index = True) profile_picture =", ".. import db from flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash,", "lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy = 'dynamic') @property", "date, datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users'", "backref = 'myblogposts', lazy = 'dynamic') blog_comments_by_me = db.relationship('BlogComment', backref = 'myblogcomments', lazy", "db.Column(db.Integer, primary_key = True) firstname = db.Column(db.String(255)) secondname = db.Column(db.String(255)) username = db.Column(db.String(255),unique", "generate_password_hash, check_password_hash from .. import login_manager from datetime import date, datetime @login_manager.user_loader def", "users password') @password.setter def password(self, password): self.secured_password = generate_password_hash(password) def verify_password(self, password): return", "= generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.secured_password, password) @classmethod def save_user(self): db.session.add(self) db.session.commit()", "werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager from datetime import date, datetime", "from werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager from datetime import date,", "index = True) profile_picture = db.Column(db.String()) profile_bio = db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me", "from .. import db from flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import", "db.Column(db.String(255)) secured_password = db.Column(db.String(255)) blog_posts_by_me = db.relationship('Blog', backref = 'myblogposts', lazy = 'dynamic')", "db from flask_login import UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from", "datetime @login_manager.user_loader def load_user(id): return User.query.get(int(id)) class User(UserMixin, db.Model): __tablename__ = 'users' id", "UserMixin, login_manager, LoginManager from werkzeug.security import generate_password_hash, check_password_hash from .. import login_manager from", "= 'dynamic') @property def password(self): raise AttributeError('You cannot view a users password') @password.setter", "backref = 'myblogcomments', lazy = 'dynamic') @property def password(self): raise AttributeError('You cannot view" ]
[ "<filename>sitykelib/modifyer.py from sitykelib.sercher import perfix import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles):", "docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles", "300 -mode copy -w 1364 -h 1016 -o [Cut]%s ' for pptfile in", "' for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def", "import perfix import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list()", "def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc-", "-odpi 300 -o [Reformed]%s ' for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"')", "arg = ' -fc- -odpi 300 -mode copy -w 1364 -h 1016 -o", "[Reformed]%s ' for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles", "os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg = '", "new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc- -odpi", "-o [Dark]%s ' for file in pdffiles: pdfname = get_pdf_name(file) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pdffiles.append('[Dark]'+pdfname) return", "os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg = ' -fc-", "= get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg =", "docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list()", "arg = ' -fc- -odpi 300 -o [Reformed]%s ' for docfile in docfiles:", "for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles):", "300 -mode copy -neg -o [Dark]%s ' for file in pdffiles: pdfname =", "get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc- -odpi", "get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg = '", "pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list()", "new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc- -odpi 300 -mode", "-h 1016 -o [Cut]%s ' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"')", "def cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc- -odpi 300 -mode copy", "-o [Reformed]%s ' for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return", "list() arg = ' -fc- -odpi 300 -mode copy -w 1364 -h 1016", "1364 -h 1016 -o [Cut]%s ' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile)", "' -fc- -odpi 300 -mode copy -neg -o [Dark]%s ' for file in", "pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg", "pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles", "= list() arg = ' -fc- -odpi 300 -mode copy -w 1364 -h", "' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def", "' -fc- -odpi 300 -o [Reformed]%s ' for docfile in docfiles: pdfname =", "import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg =", "list() arg = ' -fc- -odpi 300 -o [Reformed]%s ' for docfile in", "sitykelib.sercher import perfix import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles =", "new_pptfiles = list() arg = ' -fc- -odpi 300 -mode copy -w 1364", "[Cut]%s ' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles", "-fc- -odpi 300 -o [Reformed]%s ' for docfile in docfiles: pdfname = get_pdf_name(docfile)", "perfix import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg", "return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc- -odpi 300", "' -fc- -odpi 300 -mode copy -w 1364 -h 1016 -o [Cut]%s '", "return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc- -odpi 300", "pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg", "new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg = ' -fc- -odpi", "= ' -fc- -odpi 300 -o [Reformed]%s ' for docfile in docfiles: pdfname", "list() arg = ' -fc- -odpi 300 -mode copy -neg -o [Dark]%s '", "def dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc- -odpi 300 -mode copy", "def reform_doc(docfiles): new_docfiles = list() arg = ' -fc- -odpi 300 -o [Reformed]%s", "arg = ' -fc- -odpi 300 -mode copy -neg -o [Dark]%s ' for", "= get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg =", "for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles):", "copy -w 1364 -h 1016 -o [Cut]%s ' for pptfile in pptfiles: pdfname", "-w 1364 -h 1016 -o [Cut]%s ' for pptfile in pptfiles: pdfname =", "= ' -fc- -odpi 300 -mode copy -neg -o [Dark]%s ' for file", "in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return new_pptfiles def reform_doc(docfiles): new_docfiles =", "from sitykelib.sercher import perfix import os def get_pdf_name(original_name): return perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles", "-o [Cut]%s ' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname) return", "-mode copy -w 1364 -h 1016 -o [Cut]%s ' for pptfile in pptfiles:", "[Dark]%s ' for file in pdffiles: pdfname = get_pdf_name(file) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pdffiles.append('[Dark]'+pdfname) return new_pdffiles", "perfix(original_name)+'.pdf' def cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc- -odpi 300 -mode", "-odpi 300 -mode copy -neg -o [Dark]%s ' for file in pdffiles: pdfname", "= list() arg = ' -fc- -odpi 300 -o [Reformed]%s ' for docfile", "get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg = '", "os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc-", "-mode copy -neg -o [Dark]%s ' for file in pdffiles: pdfname = get_pdf_name(file)", "new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg = ' -fc- -odpi 300 -o", "1016 -o [Cut]%s ' for pptfile in pptfiles: pdfname = get_pdf_name(pptfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pptfiles.append('[Cut]'+pdfname)", "reform_doc(docfiles): new_docfiles = list() arg = ' -fc- -odpi 300 -o [Reformed]%s '", "300 -o [Reformed]%s ' for docfile in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname)", "-fc- -odpi 300 -mode copy -w 1364 -h 1016 -o [Cut]%s ' for", "in docfiles: pdfname = get_pdf_name(docfile) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_docfiles.append('[Reformed]'+pdfname) return new_docfiles def dark_mode(pdffiles): new_pdffiles =", "return new_pptfiles def reform_doc(docfiles): new_docfiles = list() arg = ' -fc- -odpi 300", "copy -neg -o [Dark]%s ' for file in pdffiles: pdfname = get_pdf_name(file) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"')", "-neg -o [Dark]%s ' for file in pdffiles: pdfname = get_pdf_name(file) os.system('k2pdfopt'+arg+'\"'+pdfname+'\"') new_pdffiles.append('[Dark]'+pdfname)", "-odpi 300 -mode copy -w 1364 -h 1016 -o [Cut]%s ' for pptfile", "dark_mode(pdffiles): new_pdffiles = list() arg = ' -fc- -odpi 300 -mode copy -neg", "-fc- -odpi 300 -mode copy -neg -o [Dark]%s ' for file in pdffiles:", "= ' -fc- -odpi 300 -mode copy -w 1364 -h 1016 -o [Cut]%s", "cut_ppt(pptfiles): new_pptfiles = list() arg = ' -fc- -odpi 300 -mode copy -w", "new_docfiles = list() arg = ' -fc- -odpi 300 -o [Reformed]%s ' for", "= list() arg = ' -fc- -odpi 300 -mode copy -neg -o [Dark]%s", "new_pdffiles = list() arg = ' -fc- -odpi 300 -mode copy -neg -o" ]
[ "models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True,", "Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous", "# https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False')", "return self.category_name class BlogPost(models.Model): # Fields options = ( ('draft', 'Private'), ('published', 'Public'),", "<gh_stars>0 from django.db import models # https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour", "options = ( ('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug", "unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models #", "objets dans la base de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options", "# Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous souhaitez", "'Posts' ordering = ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise", "auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True)", "blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60,", "Category' verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise", "models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title", "https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories'", "'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque", "choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post' verbose_name_plural =", "Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous souhaitez détailler", "les objets dans la base de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields", "django.urls import reverse from django.contrib.auth.models import User # Create your models here. class", ") blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description", "= models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name =", "django.db import models # https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater les", "on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options,", "blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural", "blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated',", "django.contrib.auth.models import User # Create your models here. class BlogCategory(models.Model): # Fields category_name", "lorsque vous souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self):", "null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural =", "blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI", "pour formater les URL from django.urls import reverse from django.contrib.auth.models import User #", "# Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255,", "ordering = ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas", "reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour manipuler les objets dans", "get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous souhaitez détailler le contenu", "picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog')", "blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE,", "= models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content", "est requise pas Django, lorsque vous souhaitez détailler le contenu d'un objet.\"\"\" return", "unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture',", "# https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category =", "blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata", "# Metadata class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created',", "here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug", "= 'Posts' ordering = ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction est", "null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models", "models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture =", "blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File',", "\"\"\"Fonction requise par Django pour manipuler les objets dans la base de données.\"\"\"", "return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour manipuler les objets", "import reverse from django.contrib.auth.models import User # Create your models here. class BlogCategory(models.Model):", "Django pour manipuler les objets dans la base de données.\"\"\" return self.category_name class", "blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author", "blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created", "formater les URL from django.urls import reverse from django.contrib.auth.models import User # Create", "fonction est requise pas Django, lorsque vous souhaitez détailler le contenu d'un objet.\"\"\"", "blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True)", "= models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture", "par Django pour manipuler les objets dans la base de données.\"\"\" return self.category_name", "= models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True,", "= 'Post Category' verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction", "category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta:", "utilisée pour formater les URL from django.urls import reverse from django.contrib.auth.models import User", "= 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django,", "objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour manipuler les", "Fields options = ( ('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255)", "max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head", "models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post'", "null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category", "verbose_name = 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', ) # Methods def", "max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True,", "Django, lorsque vous souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def", "données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options = ( ('draft', 'Private'), ('published',", "Django pour manipuler les objets dans la base de données.\"\"\" return self.blog_title ###", "requise par Django pour manipuler les objets dans la base de données.\"\"\" return", "your models here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True,", "les URL from django.urls import reverse from django.contrib.auth.models import User # Create your", "upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created',", "models here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True)", "Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories' # Methods", "self.category_name class BlogPost(models.Model): # Fields options = ( ('draft', 'Private'), ('published', 'Public'), )", "détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par", "__str__(self): \"\"\"Fonction requise par Django pour manipuler les objets dans la base de", "null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo", "'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette", "from django.urls import reverse from django.contrib.auth.models import User # Create your models here.", "max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) #", "title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True)", "https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering =", "max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post", "blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name", "https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater les URL from django.urls import", "auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description',", "# Fields options = ( ('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title',", "args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour manipuler les objets dans la", "verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft')", "= models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata", "import models # https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater les URL", "models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name =", "blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') #", "models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) #", "import User # Create your models here. class BlogCategory(models.Model): # Fields category_name =", "unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post Category'", "blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description =", "from django.db import models # https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater", "= models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description =", "null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite',", "models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content =", "blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status =", "Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', ) # Methods", "models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class", "models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True,", "blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True)", "Metadata class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', )", "verbose_name_plural = 'Posts' ordering = ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction", "default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts'", "category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True,", "max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte')", "unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4", "souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise", "= models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True,", "unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) #", "URL from django.urls import reverse from django.contrib.auth.models import User # Create your models", "reverse from django.contrib.auth.models import User # Create your models here. class BlogCategory(models.Model): #", "https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status", "# Cette fonction est utilisée pour formater les URL from django.urls import reverse", "requise pas Django, lorsque vous souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail',", "= models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True)", "default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta:", "models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated =", "\"\"\"Cette fonction est requise pas Django, lorsque vous souhaitez détailler le contenu d'un", "contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour", "# https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater les URL from django.urls", "le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django", "https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory,", "= models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True,", "= models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description',", "def __str__(self): \"\"\"Fonction requise par Django pour manipuler les objets dans la base", "par Django pour manipuler les objets dans la base de données.\"\"\" return self.blog_title", "'Post Category' verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est", "= ('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django,", "'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True)", "models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author',", "blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models #", "models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status', max_length=12,", "vous souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction", "class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', ) #", "User # Create your models here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category',", "dans la base de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options =", "base de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options = ( ('draft',", "= models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated", "= models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE,", "'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True,", "= models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite = models.BooleanField('Favorite', default='False') blog_status = models.CharField('Status',", "null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file =", "('blog_created', ) # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque", "fonction est utilisée pour formater les URL from django.urls import reverse from django.contrib.auth.models", "Create your models here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True,", "= ( ('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug =", "class BlogPost(models.Model): # Fields options = ( ('draft', 'Private'), ('published', 'Public'), ) blog_title", "description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True,", "max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True)", "pour manipuler les objets dans la base de données.\"\"\" return self.blog_title ### Fin", "BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug',", "= models.FileField('File', blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True)", "blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description", "de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options = ( ('draft', 'Private'),", "pour manipuler les objets dans la base de données.\"\"\" return self.category_name class BlogPost(models.Model):", "blank=True, null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True,", "blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file", "('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True,", "la base de données.\"\"\" return self.category_name class BlogPost(models.Model): # Fields options = (", "= models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name", "( ('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug',", "models.ImageField('Head picture', blank=True, null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True,", "upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title',", "on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True)", "models # https://docs.djangoproject.com/fr/3.1/intro/tutorial02/ # Cette fonction est utilisée pour formater les URL from", "max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models", "blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True, upload_to='blog')", "from django.contrib.auth.models import User # Create your models here. class BlogCategory(models.Model): # Fields", "null=True, upload_to='blog') blog_content = models.TextField('Texte') blog_file = models.FileField('File', blank=True, null=True, upload_to='blog') blog_created =", "# https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post' verbose_name_plural = 'Posts' ordering", "verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas", "verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self): \"\"\"Cette", "est utilisée pour formater les URL from django.urls import reverse from django.contrib.auth.models import", "seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo description', max_length=165,", "Cette fonction est utilisée pour formater les URL from django.urls import reverse from", "# Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories' #", "manipuler les objets dans la base de données.\"\"\" return self.category_name class BlogPost(models.Model): #", "BlogPost(models.Model): # Fields options = ( ('draft', 'Private'), ('published', 'Public'), ) blog_title =", "models.DateTimeField('Updated', auto_now=True) seo_title = models.SlugField('Seo title', max_length=60, unique=True, blank=True, null=True) seo_description = models.SlugField('Seo", "null=True) seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author =", "pas Django, lorsque vous souhaitez détailler le contenu d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)])", "blank=True, null=True, upload_to='blog') blog_created = models.DateTimeField('Created', auto_now_add=True) blog_updated = models.DateTimeField('Updated', auto_now=True) seo_title =", "def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous souhaitez détailler le", "# Create your models here. class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255,", "null=True) blog_description = models.CharField('Description', max_length=255, blank=True, null=True) blog_picture = models.ImageField('Head picture', blank=True, null=True,", "models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True,", "class BlogCategory(models.Model): # Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug =", "class Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories' # Methods def", "verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite", "null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class", "= models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='Author', blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category',", "max_length=12, choices=options, default='draft') # https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post' verbose_name_plural", ") # Methods def get_absolute_url(self): \"\"\"Cette fonction est requise pas Django, lorsque vous", "models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255, unique=True, blank=True, null=True) blog_description = models.CharField('Description', max_length=255,", "Fields category_name = models.CharField('Category', max_length=255, unique=True, blank=True, null=True) category_slug = models.SlugField('Slug', max_length=255, unique=True,", "# https://developer.mozilla.org/fr/docs/Learn/Server-side/Django/Models # Metadata class Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts", "blank=True, null=True) # https://www.youtube.com/watch?v=jFqYuWNyLnI blog_category = models.ForeignKey(BlogCategory, on_delete=models.CASCADE, verbose_name='Category', blank=True, null=True) blog_favorite =", "seo_description = models.SlugField('Seo description', max_length=165, unique=True, blank=True, null=True) # https://www.youtube.com/watch?v=_ph8GF84fX4 blog_author = models.ForeignKey(User,", "= 'Post' verbose_name_plural = 'Posts' ordering = ('blog_created', ) # Methods def get_absolute_url(self):", "d'un objet.\"\"\" return reverse('blog-detail', args=[str(self.id)]) def __str__(self): \"\"\"Fonction requise par Django pour manipuler", "('draft', 'Private'), ('published', 'Public'), ) blog_title = models.CharField('Title', max_length=255) blog_slug = models.SlugField('Slug', max_length=255,", "Meta: verbose_name = 'Post Category' verbose_name_plural = 'Posts Categories' # Methods def get_absolute_url(self):" ]
[ "-> Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]:", "Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ... class Application2: def func1(self, handler:", "func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]:", "decorator def handler(arg1: int, arg2: str) -> dict[str, Any]: ... v1: CommandHandler2 =", "def handler(arg1: int, arg2: str) -> dict[str, Any]: ... v1: CommandHandler2 = handler", "-> Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]:", "Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ... class Application1: def func1(self, handler:", "# This sample tests the case where a ParamSpec is used within a", "handler: CommandHandler2[P]) -> None: ... class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]:", "handler: CommandHandler1[P]) -> None: ... class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]:", "typing import Any, Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec P =", "This sample tests the case where a ParamSpec is used within a generic", "def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P],", "# Example 1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]]", ") -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator", "Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ... class Application1: def", "handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P], ) ->", "# type alias with a Callable. from typing import Any, Callable, Generic, Protocol", "Command1(handler) def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P])", "where a ParamSpec is used within a generic # type alias with a", "a generic # type alias with a Callable. from typing import Any, Callable,", "func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P], )", "def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P],", "... class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2(", "self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return", "decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs:", "CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return", "CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]):", "Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def handler(arg1: int,", "def __init__(self, handler: CommandHandler2[P]) -> None: ... class Application2: def func1(self, handler: CommandHandler2[P])", "None: ... class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def", "with a Callable. from typing import Any, Callable, Generic, Protocol from typing_extensions import", "def __init__(self, handler: CommandHandler1[P]) -> None: ... class Application1: def func1(self, handler: CommandHandler1[P])", "Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1: Callable generic type alias CommandHandler1", "Command2(handler) def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P])", "decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator # Example 2: Callback Protocol", "def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) ->", "CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]],", "return Command1(handler) def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler:", "-> Command1[P]: return self.func1(handler) return decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]):", "Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def", "type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler:", "alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P])", "CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) ->", ") -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator", "generic # type alias with a Callable. from typing import Any, Callable, Generic,", "Generic, Protocol from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1:", "Example 1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class", "Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) ->", "generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self,", "return self.func1(handler) return decorator def handler(arg1: int, arg2: str) -> dict[str, Any]: ...", "return decorator def handler(arg1: int, arg2: str) -> dict[str, Any]: ... v1: CommandHandler2", "def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def", "**kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) ->", "return Command2(handler) def func2( self, handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler:", "a Callable. from typing import Any, Callable, Generic, Protocol from typing_extensions import Concatenate,", "CommandHandler2[P]) -> None: ... class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return", "P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P])", "P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ... class", "ParamSpec(\"P\") # Example 1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str,", "a ParamSpec is used within a generic # type alias with a Callable.", "Command2[P]: return self.func1(handler) return decorator def handler(arg1: int, arg2: str) -> dict[str, Any]:", "Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator # Example", "= ParamSpec(\"P\") # Example 1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P],", "2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str,", "handler: CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler)", "def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) ->", "Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def", "Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ... class Application2:", "CommandHandler1[P]) -> None: ... class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return", "Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example", "the case where a ParamSpec is used within a generic # type alias", "type alias with a Callable. from typing import Any, Callable, Generic, Protocol from", "CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator # Example 2: Callback Protocol class", "CommandHandler2[P], ) -> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return", "sample tests the case where a ParamSpec is used within a generic #", "-> Command2[P]: return self.func1(handler) return decorator def handler(arg1: int, arg2: str) -> dict[str,", "P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None:", "-> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ...", "def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator # Example 2: Callback", "return decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args,", "import Any, Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\")", "... class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2(", "dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ... class", "within a generic # type alias with a Callable. from typing import Any,", "class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class", "1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]):", "Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self, handler:", "decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def handler(arg1: int, arg2: str)", "self.func1(handler) return decorator def handler(arg1: int, arg2: str) -> dict[str, Any]: ... v1:", "return self.func1(handler) return decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self,", "handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P], ) ->", "Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def", "tests the case where a ParamSpec is used within a generic # type", "Protocol from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1: Callable", "Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ...", "func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]:", "-> None: ... class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler)", "typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1: Callable generic type", "def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def handler(arg1: int, arg2:", "= Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None:", "CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def handler(arg1: int, arg2: str) ->", "Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator # Example 2:", "func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self, handler: CommandHandler2[P], )", "# Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs)", "case where a ParamSpec is used within a generic # type alias with", "Any, Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") #", "__init__(self, handler: CommandHandler1[P]) -> None: ... class Application1: def func1(self, handler: CommandHandler1[P]) ->", "Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self, handler:", "... class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ... class Application2: def", "used within a generic # type alias with a Callable. from typing import", "-> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler) return decorator #", "self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return", "__init__(self, handler: CommandHandler2[P]) -> None: ... class Application2: def func1(self, handler: CommandHandler2[P]) ->", "dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ... class Application1:", "<reponame>kihoonim/pyright # This sample tests the case where a ParamSpec is used within", "class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ... class Application1: def func1(self,", "Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def handler(arg1:", "alias with a Callable. from typing import Any, Callable, Generic, Protocol from typing_extensions", "CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self, handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]],", "from typing import Any, Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec P", "Callable[Concatenate[int, P], dict[str, Any]] class Command1(Generic[P]): def __init__(self, handler: CommandHandler1[P]) -> None: ...", "class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def func2( self,", "-> None: ... class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler)", "is used within a generic # type alias with a Callable. from typing", "from typing_extensions import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1: Callable generic", "Command1[P]: return self.func1(handler) return decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def", "ParamSpec P = ParamSpec(\"P\") # Example 1: Callable generic type alias CommandHandler1 =", "Callable. from typing import Any, Callable, Generic, Protocol from typing_extensions import Concatenate, ParamSpec", "Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]:", "-> Callable[[CommandHandler2[P]], Command2[P]]: def decorator(handler: CommandHandler2[P]) -> Command2[P]: return self.func1(handler) return decorator def", "class Command2(Generic[P]): def __init__(self, handler: CommandHandler2[P]) -> None: ... class Application2: def func1(self,", "*args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self, handler:", "__call__(self, *args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: ... class Command2(Generic[P]): def __init__(self,", "handler: CommandHandler1[P], ) -> Callable[[CommandHandler1[P]], Command1[P]]: def decorator(handler: CommandHandler1[P]) -> Command1[P]: return self.func1(handler)", "class Application2: def func1(self, handler: CommandHandler2[P]) -> Command2[P]: return Command2(handler) def func2( self,", "import Concatenate, ParamSpec P = ParamSpec(\"P\") # Example 1: Callable generic type alias", "None: ... class Application1: def func1(self, handler: CommandHandler1[P]) -> Command1[P]: return Command1(handler) def", "ParamSpec is used within a generic # type alias with a Callable. from", "P = ParamSpec(\"P\") # Example 1: Callable generic type alias CommandHandler1 = Callable[Concatenate[int,", "self.func1(handler) return decorator # Example 2: Callback Protocol class CommandHandler2(Protocol[P]): def __call__(self, *args:" ]
[ "you comment out all the palette.set* lines, you will see # all the", "2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume',", "colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2,", "parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training", "return args def main(): opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args)", "shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg')", "to save the ' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1,", "Set up a colormap: # use copy so that we do not mutate", "fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if", "print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some", "some interesting data x0, x1 = -5, 5 y0, y1 = -3, 3", "I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444,", "opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute", "the default. # If you comment out all the palette.set* lines, you will", "parser.parse_args() return args def main(): opt = Cfgs() args = parse_args() args_dict =", "ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map im =", "plt from matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs", "is applied. # Anything above that range is colored based on palette.set_over, etc.", "help='pre-load the features into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1',", "train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the '", "# -------------------------------------------------------- import os from copy import copy import numpy as np import", "parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you", "extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a", "not mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0)", "- Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z =", "path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args = parser.parse_args() return", "when an epoch finished' \"(only work when train with \" \"'train' split)\", type=bool)", "**args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout():", "import copy import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import", "data x0, x1 = -5, 5 y0, y1 = -3, 3 x =", "(Z1 - Z2) * 2 # Set up a colormap: # use copy", "split when an epoch finished' \"(only work when train with \" \"'train' split)\",", "interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked')", "type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that", "y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel", "1)**2 - (Y - 1)**2) Z = (Z1 - Z2) * 2 #", "as plt from matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs import", "help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we '", "= (Z1 - Z2) * 2 # Set up a colormap: # use", "import Execution import argparse, yaml def parse_args(): \"\"\" Parse input arguments \"\"\" parser", "'(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during", "will see # all the defaults; under and over will be colored with", "extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 =", "# range to which the regular palette color scale is applied. # Anything", "do not mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g',", "BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and", "type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args = parser.parse_args() return args", "type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the I/O speed', type=bool)", "# compute some interesting data x0, x1 = -5, 5 y0, y1 =", "Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'],", "number of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1,", "and over will be colored with the # first and last colors in", "testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch',", "3 x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X, Y", "speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix", "colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively,", "1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str)", "'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the", "5.4)) # plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0,", "type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom", "ticklabel.set_visible(False) # Plot using a small number of colors, with unevenly spaced boundaries.", "Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y -", "type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max", "'train' can trigger the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set", "parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small',", "use # palette.set_bad(alpha = 0.0) # to make the bad region transparent. This", "cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict", "# all the defaults; under and over will be colored with the #", "a colormap: # use copy so that we do not mutate the global", "defaults; under and over will be colored with the # first and last", "ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both',", "import Cfgs from core.exec import Execution import argparse, yaml def parse_args(): \"\"\" Parse", "extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2)", "memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path',", "np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z", "Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False)", "1.2, Z) # By setting vmin and vmax in the norm, we establish", "\"set 'train' can trigger the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False,", "True to evaluate the ' 'val split when an epoch finished' \"(only work", "parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root", "vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size", "argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small',", "im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1])", "split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \" \"eval after every", "print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root", "help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path',", "type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors' '(only work in", "parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up", "colored with the # first and last colors in the palette, respectively. Zm", "cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse, yaml def parse_args(): \"\"\"", "origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9,", "= -3, 3 x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500)", "random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint", "# Anything above that range is colored based on palette.set_over, etc. # set", "which the regular palette color scale is applied. # Anything above that range", "ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading',", "from matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs from", "y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow,", "Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X -", "out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ ==", "on palette.set_over, etc. # set up the Axes objects fig, (ax1, ax2) =", "'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val',", "VQA dataset # -------------------------------------------------------- import os from copy import copy import numpy as", "memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose',", "help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args", "x0, x1 = -5, 5 y0, y1 = -3, 3 x = np.linspace(x0,", "scale is applied. # Anything above that range is colored based on palette.set_over,", "the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed',", "applied. # Anything above that range is colored based on palette.set_over, etc. #", "Modular Co-Attention Networks) # modify this to our VQA dataset # -------------------------------------------------------- import", "lines, you will see # all the defaults; under and over will be", "parser.add_argument('--feature_path', help='bottom up features root path', type=str) args = parser.parse_args() return args def", "up a colormap: # use copy so that we do not mutate the", "cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) #", "np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2)", "0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar", "# Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the", "with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__", "- 1)**2 - (Y - 1)**2) Z = (Z1 - Z2) * 2", "increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str)", "choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \"", "up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot", "If you comment out all the palette.set* lines, you will see # all", "vmax in the norm, we establish the # range to which the regular", "the # range to which the regular palette color scale is applied. #", "establish the # range to which the regular palette color scale is applied.", "5 y0, y1 = -3, 3 x = np.linspace(x0, x1, 500) y =", "1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha =", "ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower',", "def main(): opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file =", "'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\",", "Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec import Execution", "y1, 500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2", "for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of colors,", "global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) #", "- (Y - 1)**2) Z = (Z1 - Z2) * 2 # Set", "core.exec import Execution import argparse, yaml def parse_args(): \"\"\" Parse input arguments \"\"\"", "after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val", "split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors' '(only work", "import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from", "np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib import colors", "etc. # set up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6,", "'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split',", "epoch finished' \"(only work when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set", "import colors from cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse, yaml", "256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load", "0.0) # to make the bad region transparent. This is the default. #", "= np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2)", "Plot using a small number of colors, with unevenly spaced boundaries. im =", "dataset # -------------------------------------------------------- import os from copy import copy import numpy as np", "This is the default. # If you comment out all the palette.set* lines,", "numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib", "choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split',", "to evaluate the ' 'val split when an epoch finished' \"(only work when", "main(): opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model)", "Networks) # modify this to our VQA dataset # -------------------------------------------------------- import os from", "type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use", "from matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec import Execution import", "' 'recommend that you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps',", "help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version',", "Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution =", "parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you use ' 'ckpt_version and", "colormap: # use copy so that we do not mutate the global colormap", "region transparent. This is the default. # If you comment out all the", "extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1)", "args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict =", "save the ' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, #", "that you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu", "default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int)", "use copy so that we do not mutate the global colormap instance palette", "with the # first and last colors in the palette, respectively. Zm =", "type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print',", "Z) # By setting vmin and vmax in the norm, we establish the", "parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase", "default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set", "-3, 3 x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X,", "\" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors'", "type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you use ' 'ckpt_version", "\"(only work when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to", "help='set True to evaluate the ' 'val split when an epoch finished' \"(only", "parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu',", "you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory", "we do not mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0)", "\" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \" \"eval after every epoch\",", "in the norm, we establish the # range to which the regular palette", "type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path,", "we establish the # range to which the regular palette color scale is", "we could use # palette.set_bad(alpha = 0.0) # to make the bad region", "parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training", "colored based on palette.set_over, etc. # set up the Axes objects fig, (ax1,", "loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2", "# -------------------------------------------------------- # mcan-vqa (Deep Modular Co-Attention Networks) # modify this to our", "0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar =", "' 'val split when an epoch finished' \"(only work when train with \"", "' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int)", "= 0.0) # to make the bad region transparent. This is the default.", "parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool)", "yaml def parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run',", "as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper", "palette.set* lines, you will see # all the defaults; under and over will", "parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors' '(only work in testing)',", "help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory',", "size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features", "color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1,", "and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ == '__main__':", "into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0,", "copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use #", "dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split,", "y1 = -3, 3 x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1,", "parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train'", "# first and last colors in the palette, respectively. Zm = np.ma.masked_where(Z >", "parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train',", "= opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f,", "copy so that we do not mutate the global colormap instance palette =", "this to our VQA dataset # -------------------------------------------------------- import os from copy import copy", "default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool)", "copy import copy import numpy as np import matplotlib.pyplot as plt from matplotlib.patches", "parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin", "a small number of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest',", "and vmax in the norm, we establish the # range to which the", "to make the bad region transparent. This is the default. # If you", "parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict", "= {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode)", "that we do not mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r',", "type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume", "origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9,", "Z2) * 2 # Set up a colormap: # use copy so that", "norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar =", "ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high,", "the # first and last colors in the palette, respectively. Zm = np.ma.masked_where(Z", "'val split when an epoch finished' \"(only work when train with \" \"'train'", "yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt)", "input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'],", "ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of colors, with unevenly spaced", "2 # Set up a colormap: # use copy so that we do", "that range is colored based on palette.set_over, etc. # set up the Axes", "1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im,", "the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0)", "in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By setting", "y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2", "(Deep Modular Co-Attention Networks) # modify this to our VQA dataset # --------------------------------------------------------", "np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 - Z2) *", "x1 = -5, 5 y0, y1 = -3, 3 x = np.linspace(x0, x1,", "parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model',", "data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ == '__main__': main() #", "-0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With", "map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0,", "can trigger the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True", "'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0,", "boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],", "with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction", "(ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map im", "norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0,", "x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional')", "type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset", "from core.exec import Execution import argparse, yaml def parse_args(): \"\"\" Parse input arguments", "= Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file,", "np.ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the norm,", "small number of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette,", "an epoch finished' \"(only work when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred',", "respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax", "palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha", "default=1, # was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch',", "use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage',", "help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you use", "features root path', type=str) args = parser.parse_args() return args def main(): opt =", "\"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the ' 'prediction vectors' '(only", "True to save the ' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size',", "Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r')", "with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0,", "dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args =", "range to which the regular palette color scale is applied. # Anything above", "gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool)", "opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with", "# was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int)", "type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int)", "unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2,", "cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1,", "is the default. # If you comment out all the palette.set* lines, you", "= fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data')", "modify this to our VQA dataset # -------------------------------------------------------- import os from copy import", "under and over will be colored with the # first and last colors", "be colored with the # first and last colors in the palette, respectively.", "# to make the bad region transparent. This is the default. # If", "= plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map im = ax1.imshow(Zm,", "execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0, x1", "see # all the defaults; under and over will be colored with the", "' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256", "choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger", "execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0, x1 = -5, 5", "our VQA dataset # -------------------------------------------------------- import os from copy import copy import numpy", "= np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 - Z2)", "interesting data x0, x1 = -5, 5 y0, y1 = -3, 3 x", "y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for", "path', type=str) args = parser.parse_args() return args def main(): opt = Cfgs() args", "args def main(): opt = Cfgs() args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file", "cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked", "Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 -", "from copy import copy import numpy as np import matplotlib.pyplot as plt from", "aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both',", "work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training',", "y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y", "Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test',", "to our VQA dataset # -------------------------------------------------------- import os from copy import copy import", "during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into", "def text_layout(): # compute some interesting data x0, x1 = -5, 5 y0,", "palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By setting vmin and", "X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X", "# Set up a colormap: # use copy so that we do not", "Anything above that range is colored based on palette.set_over, etc. # set up", "args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt)", "# set up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4))", "1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make", "and last colors in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z)", "the regular palette color scale is applied. # Anything above that range is", "'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train',", "shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small", "objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color", "training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint", "'train+val+vg'\" \"set 'train' can trigger the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch',", "copy import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle", "root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str) args = parser.parse_args()", "vmin and vmax in the norm, we establish the # range to which", "Alternatively, we could use # palette.set_bad(alpha = 0.0) # to make the bad", "last colors in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) #", "the norm, we establish the # range to which the regular palette color", "type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val split when an", "'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \"", "checkpoint path, we ' 'recommend that you use ' 'ckpt_version and ckpt_epoch '", "set up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) #", "default. # If you comment out all the palette.set* lines, you will see", "cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close()", "every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val split", "'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc()", "arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str,", "type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path',", "os from copy import copy import numpy as np import matplotlib.pyplot as plt", "= Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0, x1 =", "f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ == '__main__': main() # text_layout()", "palette.set_over, etc. # set up the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2,", "default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\"", "Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1", "-------------------------------------------------------- import os from copy import copy import numpy as np import matplotlib.pyplot", "= yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path()", "from cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse, yaml def parse_args():", "ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number", "import Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec import", "= parser.parse_args() return args def main(): opt = Cfgs() args = parse_args() args_dict", "the palette.set* lines, you will see # all the defaults; under and over", "np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 -", "up features root path', type=str) args = parser.parse_args() return args def main(): opt", "color scale is applied. # Anything above that range is colored based on", "will be colored with the # first and last colors in the palette,", "the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By setting vmin", "instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we", "matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec", "\"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train')", "# use copy so that we do not mutate the global colormap instance", "help='set True to save the ' 'prediction vectors' '(only work in testing)', type=bool)", "setting vmin and vmax in the norm, we establish the # range to", "matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib import colors from cfgs.base_cfgs", "help='load checkpoint path, we ' 'recommend that you use ' 'ckpt_version and ckpt_epoch", "import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib import colors from", "-5, 5 y0, y1 = -3, 3 x = np.linspace(x0, x1, 500) y", "using a small number of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm,", "seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version',", "with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict}", "x1, 500) y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1", "parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch',", "Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels():", "epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val split when", "x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform')", "pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str)", "# palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is", "parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training',", "500) y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1 =", "' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int)", "ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in", "spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5,", "the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate", "* 2 # Set up a colormap: # use copy so that we", "the ' 'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was", "opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader)", "over will be colored with the # first and last colors in the", "f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:')", "= -5, 5 y0, y1 = -3, 3 x = np.linspace(x0, x1, 500)", "help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the", "help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features", "open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict)", "im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N),", "Zm = np.ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in", "all the defaults; under and over will be colored with the # first", "help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \" \"eval", "cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of", "fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1", "\"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \" \"eval after every epoch\", type=str)", "Co-Attention Networks) # modify this to our VQA dataset # -------------------------------------------------------- import os", "plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto',", "type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'],", "when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save the", "masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1) plt.close() if __name__ == '__main__': main()", "'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers',", "the ' 'val split when an epoch finished' \"(only work when train with", "palette.set_bad(alpha = 0.0) # to make the bad region transparent. This is the", "= ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low,", "could use # palette.set_bad(alpha = 0.0) # to make the bad region transparent.", "range is colored based on palette.set_over, etc. # set up the Axes objects", "eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version control',", "transparent. This is the default. # If you comment out all the palette.set*", "Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous'", "# mcan-vqa (Deep Modular Co-Attention Networks) # modify this to our VQA dataset", "help='bottom up features root path', type=str) args = parser.parse_args() return args def main():", "np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y)", "palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could", "comment out all the palette.set* lines, you will see # all the defaults;", "ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(), f'results/val_imgs/dark_mask.jpg') plt.savefig(f1)", "bad region transparent. This is the default. # If you comment out all", "plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear',", "ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range", "import os from copy import copy import numpy as np import matplotlib.pyplot as", "# Plot using a small number of colors, with unevenly spaced boundaries. im", "- 1)**2) Z = (Z1 - Z2) * 2 # Set up a", "# If you comment out all the palette.set* lines, you will see #", "= fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot", "usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem', help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose", "0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm')", "norm, we establish the # range to which the regular palette color scale", "out all the palette.set* lines, you will see # all the defaults; under", "cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar", "'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str) parser.add_argument('--split', dest='train_split', choices=['train',", "help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load", "help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint", "{**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def", "colors from cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse, yaml def", "\"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val',", "features into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select,", "trigger the \" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to", "to which the regular palette color scale is applied. # Anything above that", "\"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict,", "is colored based on palette.set_over, etc. # set up the Axes objects fig,", "compute some interesting data x0, x1 = -5, 5 y0, y1 = -3,", "interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0,", "import argparse, yaml def parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN", "Cfgs from core.exec import Execution import argparse, yaml def parse_args(): \"\"\" Parse input", "By setting vmin and vmax in the norm, we establish the # range", "first and last colors in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2,", "above that range is colored based on palette.set_over, etc. # set up the", "fig.colorbar(im, extend='both', shrink=0.9, ax=ax1) cbar.set_label('uniform') for ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using", "type=str) parser.add_argument('--split', dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set", "print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data", "Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0, x1 = -5,", "parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the ' 'val split when an epoch", "> 1.2, Z) # By setting vmin and vmax in the norm, we", "dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model', choices=['small', 'large'], default='small', type=str)", "# By setting vmin and vmax in the norm, we establish the #", "parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed',", "spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with out-of-range and masked data') f1 = os.path.join(os.getcwd(),", "training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory'", "opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting data x0,", "palette color scale is applied. # Anything above that range is colored based", "args = parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as", "evaluate the ' 'val split when an epoch finished' \"(only work when train", "epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend that you use '", "500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 =", "= np.ma.masked_where(Z > 1.2, Z) # By setting vmin and vmax in the", "'recommend that you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce", "make the bad region transparent. This is the default. # If you comment", "- Z2) * 2 # Set up a colormap: # use copy so", "text_layout(): # compute some interesting data x0, x1 = -5, 5 y0, y1", "mutate the global colormap instance palette = copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b',", "1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) #", "fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using 'continuous' color map", "in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch size during training', type=int)", "Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): # compute some interesting", "opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution = Execution(opt) execution.run(opt.run_mode) def text_layout(): #", "the bad region transparent. This is the default. # If you comment out", "parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we", "'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded loading', type=int) parser.add_argument('--pin_mem',", "type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to", "training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can trigger the \" \"eval after", "def parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode',", "path, we ' 'recommend that you use ' 'ckpt_version and ckpt_epoch ' 'instead',", "= np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2", "-0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto', origin='lower', extent=[x0, x1, y0, y1])", "root path', type=str) args = parser.parse_args() return args def main(): opt = Cfgs()", "default=False, help='set True to evaluate the ' 'val split when an epoch finished'", "help='fix random seed', type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version',", "in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of colors, with unevenly", "was 256 help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload',", "= \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f: yaml_dict = yaml.load(f, Loader=yaml.FullLoader) args_dict =", "-------------------------------------------------------- # mcan-vqa (Deep Modular Co-Attention Networks) # modify this to our VQA", "the features into memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu", "mcan-vqa (Deep Modular Co-Attention Networks) # modify this to our VQA dataset #", "colors in the palette, respectively. Zm = np.ma.masked_where(Z > 1.2, Z) # By", "memory' 'to increase the I/O speed', type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1,", "1)**2) Z = (Z1 - Z2) * 2 # Set up a colormap:", "x = np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X, Y =", "epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the I/O speed',", "'prediction vectors' '(only work in testing)', type=bool) parser.add_argument('--batch_size', default=1, # was 256 help='batch", "version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path', help='load checkpoint path, we ' 'recommend", "type=bool) parser.add_argument('--gpu', default='0,1', help=\"gpu select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random", "= copy(plt.cm.gray) palette.set_over('r', 1.0) palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use", "aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional',", "finished' \"(only work when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True", "control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch',", "based on palette.set_over, etc. # set up the Axes objects fig, (ax1, ax2)", "of colors, with unevenly spaced boundaries. im = ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5,", "# plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0),", "yaml.load(f, Loader=yaml.FullLoader) args_dict = {**yaml_dict, **args_dict} opt.add_args(args_dict) opt.proc() print('Hyper Parameters:') print(opt) opt.check_path() execution", "y1]) ax2.set_title('With BoundaryNorm') cbar = fig.colorbar(im, extend='both', spacing='proportional', shrink=0.9, ax=ax2) cbar.set_label('proportional') fig.suptitle('imshow, with", "= argparse.ArgumentParser(description='MCAN Args') parser.add_argument('--run', dest='run_mode', choices=['train', 'val', 'test', 'visualize'], type=str, default='train') parser.add_argument('--model', dest='model',", "(Y - 1)**2) Z = (Z1 - Z2) * 2 # Set up", "\"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the '", "= parse_args() args_dict = opt.parse_to_dict(args) cfg_file = \"cfgs/{}_model.yml\".format(args.model) with open(cfg_file, 'r') as f:", "the Axes objects fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 5.4)) # plot using", "so that we do not mutate the global colormap instance palette = copy(plt.cm.gray)", "y0, y1 = -3, 3 x = np.linspace(x0, x1, 500) y = np.linspace(y0,", "Z = (Z1 - Z2) * 2 # Set up a colormap: #", "# modify this to our VQA dataset # -------------------------------------------------------- import os from copy", "help='use pin memory', type=bool) parser.add_argument('--verbose', help='verbose print', type=bool) parser.add_argument('--dataset_path', help='vqav2 dataset root path',", "matplotlib import colors from cfgs.base_cfgs import Cfgs from core.exec import Execution import argparse,", "palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0) # to", "dest='train_split', choices=['train', 'train+val', 'train+val+vg'], help=\"set training split, \" \"eg.'train', 'train+val+vg'\" \"set 'train' can", "all the palette.set* lines, you will see # all the defaults; under and", "parser.add_argument('--dataset_path', help='vqav2 dataset root path', type=str) parser.add_argument('--feature_path', help='bottom up features root path', type=str)", "= np.linspace(y0, y1, 500) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 -", "palette.set_under('g', 1.0) palette.set_bad('b', 1.0) # Alternatively, we could use # palette.set_bad(alpha = 0.0)", "training epoch', type=int) parser.add_argument('--preload', help='pre-load the features into memory' 'to increase the I/O", "\" \"eval after every epoch\", type=str) parser.add_argument('--eval_every_epoch', default=False, help='set True to evaluate the", "help='batch size during training', type=int) parser.add_argument('--max_epoch', help='max training epoch', type=int) parser.add_argument('--preload', help='pre-load the", "type=int) parser.add_argument('--version', help='version control', type=str) parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str)", "Execution import argparse, yaml def parse_args(): \"\"\" Parse input arguments \"\"\" parser =", "argparse, yaml def parse_args(): \"\"\" Parse input arguments \"\"\" parser = argparse.ArgumentParser(description='MCAN Args')", "parser.add_argument('--resume', help='resume training', type=bool) parser.add_argument('--ckpt_version', help='checkpoint version', type=str) parser.add_argument('--ckpt_epoch', help='checkpoint epoch', type=int) parser.add_argument('--ckpt_path',", "type=str) args = parser.parse_args() return args def main(): opt = Cfgs() args =", "work when train with \" \"'train' split)\", type=bool) parser.add_argument('--test_save_pred', help='set True to save", "the defaults; under and over will be colored with the # first and", "ticklabel in ax1.xaxis.get_ticklabels(): ticklabel.set_visible(False) # Plot using a small number of colors, with", "= ax2.imshow(Zm, interpolation='nearest', cmap=palette, norm=colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1], ncolors=palette.N), aspect='auto',", "args = parser.parse_args() return args def main(): opt = Cfgs() args = parse_args()", "vmax=1.0), aspect='auto', origin='lower', extent=[x0, x1, y0, y1]) ax1.set_title('Green=low, Red=high, Blue=masked') cbar = fig.colorbar(im,", "using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette, norm=colors.Normalize(vmin=-1.0, vmax=1.0), aspect='auto', origin='lower',", "= np.linspace(x0, x1, 500) y = np.linspace(y0, y1, 500) X, Y = np.meshgrid(x,", "select, eg.'0, 1, 2'\", type=str) parser.add_argument('--seed', default=444, help='fix random seed', type=int) parser.add_argument('--version', help='version", "you will see # all the defaults; under and over will be colored", "we ' 'recommend that you use ' 'ckpt_version and ckpt_epoch ' 'instead', type=str)", "as np import matplotlib.pyplot as plt from matplotlib.patches import Rectangle from matplotlib import", "figsize=(6, 5.4)) # plot using 'continuous' color map im = ax1.imshow(Zm, interpolation='bilinear', cmap=palette,", "regular palette color scale is applied. # Anything above that range is colored", "and ckpt_epoch ' 'instead', type=str) parser.add_argument('--grad_accu_steps', help='reduce gpu memory usage', type=int) parser.add_argument('--num_workers', help='multithreaded" ]
[ "and authenticate the user\"\"\" email = attrs.get('email') password = attrs.get('password') user = authenticate(", "= Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields", "= '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng =", "user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields = '__all__'", "Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields =", "Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto, Usuario class", "class Meta: model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model", "'__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields =", "Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields = '__all__' depth = 1 class", "= '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields = '__all__' depth", "Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class Meta: fields =", "model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol", "the user\"\"\" email = attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email,", "with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer):", "_ from rest_framework import serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto,", "model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol", "depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields = '__all__'", "'__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields =", "class Meta: model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model", "user\"\"\" email = attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password", "= Rol fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model", "Meta: model = Historial_Turno fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat", "authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg = _('Unable to authenticate", "= '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields = '__all__' class", "fields = ('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def", "Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class", "request=self.context.get('request'), username=email, password=password ) if not user: msg = _('Unable to authenticate with", "authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class", "PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields = '__all__' depth = 2 class", "= Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields", "= 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields = '__all__' depth", "Meta: model = get_user_model() fields = ('email', 'password', 'username') extra_kwargs = {'password': {'write_only':", "IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta:", "model = get_user_model() fields = ('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True,", "Meta: model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model =", "import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields", "RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields = '__all__' depth = 1 class", "credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta:", "Meta: model = Departamento_Turno fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class", "Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields =", "Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields =", "Puesto fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model =", "class Meta: model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model", "code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno", "attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields = '__all__' depth =", "Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields =", "EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto,", "= get_user_model() fields = ('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length':", ".models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento", "validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email') password = attrs.get('password')", "Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields =", "\"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email') password = attrs.get('password') user =", "'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data)", "class Meta: model = get_user_model() fields = ('email', 'password', 'username') extra_kwargs = {'password':", "'__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40,", "= serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class Meta: fields = ('Lat', 'lng')", "style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email", "import ugettext_lazy as _ from rest_framework import serializers from .models import Departamento, EstatusUsuario,", "= ('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def create(self,", "msg = _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] =", "class Meta: model = Departamento_Turno fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer):", "= 2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields = ('email', 'password',", "fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields = '__all__'", "Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields = '__all__'", "Rol fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model =", "if not user: msg = _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg,", "Historial_Turno fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False)", "depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__'", "= '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields", "attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if not", "'__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields = '__all__' depth =", "class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields = '__all__' depth = 1", "django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from .models import Departamento,", "Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer):", "return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields = '__all__' depth", "= Departamento_Turno fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model", "'__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer):", "rest_framework import serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol,", "email = attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password )", "model = Departamento_Turno fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta:", "Meta: model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model =", "= Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields", "= '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields = '__all__' class", "from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from .models import", "= '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields = '__all__' class", "email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs):", "Departamento_Turno fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model =", "Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields = '__all__' depth = 4 class", "model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope", "fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields = '__all__'", "fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model()", "get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False )", "django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers", "username=email, password=password ) if not user: msg = _('Unable to authenticate with provided", "from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models", "Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields = '__all__' class", "ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta:", "fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields = '__all__'", "= 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__' depth", "class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__' depth = 2", "create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type':", "model = Usuario fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta:", "= serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate the", "class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class Meta: fields", "= serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate", "'__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer):", "Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta:", "authenticate the user\"\"\" email = attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'),", "fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng", "import authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from", "password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if not user:", "depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False)", "class Meta: model = Historial_Turno fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer):", "serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from", "True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField()", "def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField(", "class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def", "UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields = ('email', 'password', 'username') extra_kwargs =", "('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data):", "ugettext_lazy as _ from rest_framework import serializers from .models import Departamento, EstatusUsuario, Historial_Turno,", "from .models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model =", "class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields = ('email', 'password', 'username') extra_kwargs", "fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields = '__all__'", "from rest_framework import serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope,", "as _ from rest_framework import serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma,", "from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework import", "Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno,", "Usuario fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model =", "get_user_model() fields = ('email', 'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}", "password=password ) if not user: msg = _('Unable to authenticate with provided credentials')", "= user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields =", ".models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import", "= Historial_Turno fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40,", "django.contrib.auth import get_user_model from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _", "Meta: model = Puesto fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class", "model = Rol fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta:", "= '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields", "class Meta: model = Puesto fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer):", "lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class Meta: fields = ('Lat',", "'__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields =", "AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self,", "depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields = '__all__'", "class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class", "= Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields", "Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer):", "Meta: model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model =", "password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate", "_('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return", "= _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user", "from django.contrib.auth import get_user_model from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as", "{'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email", "attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg =", "1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields = '__all__' depth =", ") if not user: msg = _('Unable to authenticate with provided credentials') raise", "fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario", "= '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields", "= Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields", "5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password =", "= '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields", "Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields =", "import get_user_model from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from", "= authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg = _('Unable to", "Meta: model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model =", "fields = '__all__' depth = 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto", "Meta: model = Usuario fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class", "extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class", "user: msg = _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user']", "class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class", "Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno, Turno, Puesto, Usuario", "{'write_only': True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email =", "serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model =", "def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email') password =", "class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class", "fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno", "class Meta: model = Usuario fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer):", "to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs", "2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields = ('email', 'password', 'username')", "'__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer):", "= 1 class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields = '__all__' depth", "import serializers from .models import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol", "<gh_stars>0 from django.contrib.auth import get_user_model from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy", "class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class", "model = Puesto fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta:", "Rol from .models import Departamento_Turno, Turno, Puesto, Usuario class DepartamentoSerializer(serializers.ModelSerializer): class Meta: model", "EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta:", "= {'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer):", "trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email')", "class Meta: model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model", "Meta: model = Rol fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class", "not user: msg = _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authorization')", "'__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields =", "serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and", ") def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email') password", "class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields = '__all__' depth = 4", "model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno", "serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\"", "= '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields = '__all__' class", "'min_length': 5}} def create(self, validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password", "TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta:", "class Meta: model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model", "model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma", "model = Historial_Turno fields = '__all__' depth = 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat =", "EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields =", "= attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg", "provided credentials') raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class", "model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario", "return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'}, trim_whitespace=False", "attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Historial_Turno fields", "depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model = get_user_model() fields = ('email',", "= attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if", "class EstatusSerializer(serializers.ModelSerializer): class Meta: model = EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class", "'__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer):", "Meta: model = Turno fields = '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model =", "authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework import serializers from .models", "validated_data): return get_user_model().objects.create_user(**validated_data) class AuthTokenSerializer(serializers.Serializer): email = serializers.CharField() password = serializers.CharField( style={'input_type': 'password'},", "user = authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg = _('Unable", "class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields = '__all__' class RolSerializer(serializers.ModelSerializer): class", "1 class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields = '__all__' depth =", "= '__all__' class Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields = '__all__' class", "= Puesto fields = '__all__' depth = 2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model", "import Departamento, EstatusUsuario, Historial_Turno, Idioma, Puesto, Scope, Tipo_Rol, Rol from .models import Departamento_Turno,", "= 4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class", "fields = '__all__' class RolSerializer(serializers.ModelSerializer): class Meta: model = Rol fields = '__all__'", "= EstatusUsuario fields = '__all__' class IdiomaSerializer(serializers.ModelSerializer): class Meta: model = Idioma fields", "raise serializers.ValidationError(msg, code='authorization') attrs['user'] = user return attrs class Historial_TurnoSerializer(serializers.ModelSerializer): class Meta: model", "'__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta: model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer):", "class Meta: model = Rol fields = '__all__' depth = 1 class Departamento_TurnoSerializer(serializers.ModelSerializer):", "DepartamentoSerializer(serializers.ModelSerializer): class Meta: model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta:", "'password'}, trim_whitespace=False ) def validate(self, attrs): \"\"\"Validate and authenticate the user\"\"\" email =", "4 class Usuario_Lat_Lng_Serializer(serializers.Serializer): lat = serializers.CharField(max_length=40, allow_blank=False) lng = serializers.CharField(max_length=40, allow_blank=False) class Meta:", "UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__' depth = 2 class", "2 class UsuarioSerializer(serializers.ModelSerializer): class Meta: model = Usuario fields = '__all__' depth =", "class Departamento_TurnoSerializer(serializers.ModelSerializer): class Meta: model = Departamento_Turno fields = '__all__' depth = 1", "Tipo_RolSerializer(serializers.ModelSerializer): class Meta: model = Tipo_Rol fields = '__all__' class ScopeSerializer(serializers.ModelSerializer): class Meta:", "get_user_model from django.contrib.auth import authenticate from django.utils.translation import ugettext_lazy as _ from rest_framework", "attrs): \"\"\"Validate and authenticate the user\"\"\" email = attrs.get('email') password = attrs.get('password') user", "class PuestoSerializer(serializers.ModelSerializer): class Meta: model = Puesto fields = '__all__' depth = 2", "fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model = Turno fields = '__all__'", "Meta: model = Departamento fields = '__all__' class TurnoSerializer(serializers.ModelSerializer): class Meta: model =", "class Meta: model = Scope fields = '__all__' class EstatusSerializer(serializers.ModelSerializer): class Meta: model", "= Usuario fields = '__all__' depth = 2 class UserSerializer(serializers.ModelSerializer): class Meta: model", "'password', 'username') extra_kwargs = {'password': {'write_only': True, 'min_length': 5}} def create(self, validated_data): return" ]
[ "as plt import numpy as np from PyDune.physics.sedtransport import transport_laws as TL theta", "= 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d,", "r\"\"\" =========== Transport laws =========== Create a plot comparing the different transport laws.", "laws =========== Create a plot comparing the different transport laws. \"\"\" import matplotlib.pyplot", "transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional", "TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8 plt.figure()", "=========== Create a plot comparing the different transport laws. \"\"\" import matplotlib.pyplot as", "omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$')", "plot comparing the different transport laws. \"\"\" import matplotlib.pyplot as plt import numpy", "law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport", "transport laws. \"\"\" import matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport", "label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional saturated flux') plt.legend() plt.tight_layout() plt.show()", "as TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8", "a plot comparing the different transport laws. \"\"\" import matplotlib.pyplot as plt import", "np from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d", "TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law')", "import transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega", "0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta,", "theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta,", "= 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law')", "8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega),", "transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic", "comparing the different transport laws. \"\"\" import matplotlib.pyplot as plt import numpy as", "np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d,", "matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport import transport_laws as TL", "theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport", "omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta,", "plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic", "theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta,", "theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number,", "plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport", "plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law')", "from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d =", "import numpy as np from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0,", "law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional saturated", "TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional saturated flux') plt.legend()", "import matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport import transport_laws as", "omega), label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta,", "TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield", "transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035 omega =", "label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non", "plt.plot(theta, TL.quartic_transport_law(theta, theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional saturated flux')", "the different transport laws. \"\"\" import matplotlib.pyplot as plt import numpy as np", "1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega), label='quadratic", "Create a plot comparing the different transport laws. \"\"\" import matplotlib.pyplot as plt", "= np.linspace(0, 0.4, 1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta,", "laws. \"\"\" import matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport import", "PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4, 1000) theta_d = 0.035", "label='quadratic transport law') plt.plot(theta, TL.cubic_transport_law(theta, theta_d, omega), label='cubic transport law') plt.plot(theta, TL.quartic_transport_law(theta, theta_d),", "numpy as np from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4,", "\"\"\" import matplotlib.pyplot as plt import numpy as np from PyDune.physics.sedtransport import transport_laws", "theta_d), label='cubic transport law') plt.xlabel(r'Shield number, $\\theta$') plt.ylabel('Non dimensional saturated flux') plt.legend() plt.tight_layout()", "=========== Transport laws =========== Create a plot comparing the different transport laws. \"\"\"", "plt import numpy as np from PyDune.physics.sedtransport import transport_laws as TL theta =", "different transport laws. \"\"\" import matplotlib.pyplot as plt import numpy as np from", "0.4, 1000) theta_d = 0.035 omega = 8 plt.figure() plt.plot(theta, TL.quadratic_transport_law(theta, theta_d, omega),", "Transport laws =========== Create a plot comparing the different transport laws. \"\"\" import", "as np from PyDune.physics.sedtransport import transport_laws as TL theta = np.linspace(0, 0.4, 1000)" ]
[ "memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI", "rekursif maka python akan menunjukan error TypeError: 'NoneType' object is not callable |", "# nah ini lah yang unik dari python decorator dilambangkan dengan @ dan", "maka python akan menunjukan error TypeError: 'NoneType' object is not callable | ini", "output function_data = 'function data' print(function_data) return func # nah ini lah yang", "mengembalikan value jika tidak ada rekursif maka python akan menunjukan error TypeError: 'NoneType'", "tidak adanya rekursif pada fungsi. ''' def f1(func): # jika ada variabel yang", "'NoneType' object is not callable | ini dikarenakan tidak adanya rekursif pada fungsi.", "jika ada variabel yang di keluarkan maka akan menghasilkan output function_data = 'function", "def f1(func): # jika ada variabel yang di keluarkan maka akan menghasilkan output", "menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat", "unik dari python decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi", "# lalu coba panggil fungsi yang baru dan coba output seperti apakah yang", "rekursif pada fungsi. ''' def f1(func): # jika ada variabel yang di keluarkan", "direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang akan dipanggil. jadi di decorator", "jadi di decorator pada python harus fungsi rekursif untuk mengembalikan value jika tidak", "sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang akan dipanggil.", "fungsi rekursif untuk mengembalikan value jika tidak ada rekursif maka python akan menunjukan", "python decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi @f1 def", "ini bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello') # lalu coba panggil", "ini dikarenakan tidak adanya rekursif pada fungsi. ''' def f1(func): # jika ada", "jika tidak ada rekursif maka python akan menunjukan error TypeError: 'NoneType' object is", "''' membuat fungsi yang akan dipanggil. jadi di decorator pada python harus fungsi", "akan menunjukan error TypeError: 'NoneType' object is not callable | ini dikarenakan tidak", "variabel yang di keluarkan maka akan menghasilkan output function_data = 'function data' print(function_data)", "''' def f1(func): # jika ada variabel yang di keluarkan maka akan menghasilkan", "yang di keluarkan maka akan menghasilkan output function_data = 'function data' print(function_data) return", "fungsi yang akan dipanggil. jadi di decorator pada python harus fungsi rekursif untuk", "decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang akan", "pembungkusan fungsi maka python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada:", "akan dipanggil. jadi di decorator pada python harus fungsi rekursif untuk mengembalikan value", "membuat fungsi yang akan dipanggil. jadi di decorator pada python harus fungsi rekursif", "https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang akan dipanggil. jadi di", "dan ini bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello') # lalu coba", "yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi", "panggil fungsi yang baru dan coba output seperti apakah yang akan keluar func_data()", "function_data = 'function data' print(function_data) return func # nah ini lah yang unik", "di keluarkan maka akan menghasilkan output function_data = 'function data' print(function_data) return func", "python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' '''", "print('hello') # lalu coba panggil fungsi yang baru dan coba output seperti apakah", "lalu coba panggil fungsi yang baru dan coba output seperti apakah yang akan", "print(function_data) return func # nah ini lah yang unik dari python decorator dilambangkan", "func_data(): print('hello') # lalu coba panggil fungsi yang baru dan coba output seperti", "menunjukan error TypeError: 'NoneType' object is not callable | ini dikarenakan tidak adanya", "data' print(function_data) return func # nah ini lah yang unik dari python decorator", "rekursif untuk mengembalikan value jika tidak ada rekursif maka python akan menunjukan error", "dalam pembungkusan fungsi maka python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi", "maka python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 '''", "python harus fungsi rekursif untuk mengembalikan value jika tidak ada rekursif maka python", "is not callable | ini dikarenakan tidak adanya rekursif pada fungsi. ''' def", "maka akan menghasilkan output function_data = 'function data' print(function_data) return func # nah", "pada python harus fungsi rekursif untuk mengembalikan value jika tidak ada rekursif maka", "jadi agar memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya decorator. sumber", "'function data' print(function_data) return func # nah ini lah yang unik dari python", "dengan @ dan ini bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello') #", "| ini dikarenakan tidak adanya rekursif pada fungsi. ''' def f1(func): # jika", "decorator jadi agar memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya decorator.", "''' decorator jadi agar memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya", "harus fungsi rekursif untuk mengembalikan value jika tidak ada rekursif maka python akan", "pada fungsi. ''' def f1(func): # jika ada variabel yang di keluarkan maka", "tidak ada rekursif maka python akan menunjukan error TypeError: 'NoneType' object is not", "func # nah ini lah yang unik dari python decorator dilambangkan dengan @", "object is not callable | ini dikarenakan tidak adanya rekursif pada fungsi. '''", "@ dan ini bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello') # lalu", "dikarenakan tidak adanya rekursif pada fungsi. ''' def f1(func): # jika ada variabel", "not callable | ini dikarenakan tidak adanya rekursif pada fungsi. ''' def f1(func):", "nah ini lah yang unik dari python decorator dilambangkan dengan @ dan ini", "untuk memanggil fungsi @f1 def func_data(): print('hello') # lalu coba panggil fungsi yang", "TypeError: 'NoneType' object is not callable | ini dikarenakan tidak adanya rekursif pada", "f1(func): # jika ada variabel yang di keluarkan maka akan menghasilkan output function_data", "decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi @f1 def func_data():", "pada: 26-02-2021 ''' ''' membuat fungsi yang akan dipanggil. jadi di decorator pada", "= 'function data' print(function_data) return func # nah ini lah yang unik dari", "untuk mengembalikan value jika tidak ada rekursif maka python akan menunjukan error TypeError:", "fungsi. ''' def f1(func): # jika ada variabel yang di keluarkan maka akan", "dari python decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi @f1", "decorator pada python harus fungsi rekursif untuk mengembalikan value jika tidak ada rekursif", "python akan menunjukan error TypeError: 'NoneType' object is not callable | ini dikarenakan", "memanggil fungsi @f1 def func_data(): print('hello') # lalu coba panggil fungsi yang baru", "callable | ini dikarenakan tidak adanya rekursif pada fungsi. ''' def f1(func): #", "ini lah yang unik dari python decorator dilambangkan dengan @ dan ini bertujuan", "fungsi @f1 def func_data(): print('hello') # lalu coba panggil fungsi yang baru dan", "''' ''' membuat fungsi yang akan dipanggil. jadi di decorator pada python harus", "@f1 def func_data(): print('hello') # lalu coba panggil fungsi yang baru dan coba", "agar memudahkan dalam pembungkusan fungsi maka python menyediakan yang namanya decorator. sumber referensi:", "bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello') # lalu coba panggil fungsi", "namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang", "dilambangkan dengan @ dan ini bertujuan untuk memanggil fungsi @f1 def func_data(): print('hello')", "return func # nah ini lah yang unik dari python decorator dilambangkan dengan", "def func_data(): print('hello') # lalu coba panggil fungsi yang baru dan coba output", "yang akan dipanggil. jadi di decorator pada python harus fungsi rekursif untuk mengembalikan", "menghasilkan output function_data = 'function data' print(function_data) return func # nah ini lah", "lah yang unik dari python decorator dilambangkan dengan @ dan ini bertujuan untuk", "keluarkan maka akan menghasilkan output function_data = 'function data' print(function_data) return func #", "yang unik dari python decorator dilambangkan dengan @ dan ini bertujuan untuk memanggil", "26-02-2021 ''' ''' membuat fungsi yang akan dipanggil. jadi di decorator pada python", "error TypeError: 'NoneType' object is not callable | ini dikarenakan tidak adanya rekursif", "coba panggil fungsi yang baru dan coba output seperti apakah yang akan keluar", "akan menghasilkan output function_data = 'function data' print(function_data) return func # nah ini", "fungsi maka python menyediakan yang namanya decorator. sumber referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021", "di decorator pada python harus fungsi rekursif untuk mengembalikan value jika tidak ada", "ada rekursif maka python akan menunjukan error TypeError: 'NoneType' object is not callable", "referensi: https://www.youtube.com/watch?v=r7Dtus7N4pI direvisi pada: 26-02-2021 ''' ''' membuat fungsi yang akan dipanggil. jadi", "adanya rekursif pada fungsi. ''' def f1(func): # jika ada variabel yang di", "ada variabel yang di keluarkan maka akan menghasilkan output function_data = 'function data'", "value jika tidak ada rekursif maka python akan menunjukan error TypeError: 'NoneType' object", "dipanggil. jadi di decorator pada python harus fungsi rekursif untuk mengembalikan value jika", "# jika ada variabel yang di keluarkan maka akan menghasilkan output function_data =" ]
[ "state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape,", "file: dict) -> None: self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def", "> 0 else None def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def", "else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for", "dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction)", "new_test_name def clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self,", "data[test_name] = self.__tests[test_name].state return data # TODO: возможно лучше предложить выбор: загрузка с", "заменой или с добавлением в конец def set_from_file(self, file: dict) -> None: self.clear()", "event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None: self.__last_time_dragged = time() super().startDrag(supported_actions)", "event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None: self.__last_time_dragged", "self.__tests[test_name].state return data # TODO: возможно лучше предложить выбор: загрузка с заменой или", "else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove: bool", "if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else:", "test: QListWidgetItem, global_: bool = False) -> None: if test.text() in self.__tests: if", "if test_name is None: test_name = self.get_test_name() if test is None: test =", "if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept()", "and new_name != ignore_name: new_name = f'{name}-{k}' k += 1 return new_name def", "test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return", "if test is None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() |", "None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() -", "self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear()", "self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return", "reset: bool = True, saved_state: dict = None) -> QListWidgetItem: if test_name is", "Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count() > 0 else None def", "state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self,", "self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool = False) -> None:", "True, saved_state: dict = None) -> QListWidgetItem: if test_name is None: test_name =", "test is None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable)", "-> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count()", "self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name:", "if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def", "= state if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return", "None def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem)", "self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool = False) -> None: if", "from time import time from PyQt5 import QtCore from PyQt5.QtCore import Qt from", "self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None) -> str:", "= None, state: dict = None, test: QListWidgetItem = None, reset: bool =", "super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property def", "self.__tests[test_name].test = test self.__tests[test_name].state = state if state is not None else Tape.get_empty_data()", "class Data: test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test'", "internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count()", "save_state(self, test: QListWidgetItem, global_: bool = False) -> None: if test.text() in self.__tests:", "выбор: загрузка с заменой или с добавлением в конец def set_from_file(self, file: dict)", "dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO: возможно", "is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test:", "self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self)", "in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self)", "self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data = dict() for test_name in", "links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self,", "'test' def __init__(self, tape: Tape, parent: any = None): super().__init__(parent) self.__tape = tape", "False) -> None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else:", "event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction)", "= False) -> None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data()", "None, state: dict = None, test: QListWidgetItem = None, reset: bool = True,", "test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data()", "for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests)", "test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test", "dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged def", "= list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions:", "not name: name = self.DEFAULT_NAME k = 1 new_name = name while new_name", "return data # TODO: возможно лучше предложить выбор: загрузка с заменой или с", "str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self)", "in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) > 0", "test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool = False)", "links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None: self.__last_time_dragged =", "else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data = dict() for test_name", "k += 1 return new_name def add_test(self, test_name: str = None, state: dict", "saved_state: dict = None) -> QListWidgetItem: if test_name is None: test_name = self.get_test_name()", "len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self,", "dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls():", "self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) -> float: return", "self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test))", "tape: Tape, parent: any = None): super().__init__(parent) self.__tape = tape self.__tests = dict()", "Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove: bool =", "test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset:", "None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove:", "# устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection)", "def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None) -> str: if", "file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def", "typing import Union from time import time from PyQt5 import QtCore from PyQt5.QtCore", "test_name is None: test_name = self.get_test_name() if test is None: test = QListWidgetItem()", "DEFAULT_NAME, ignore_name: str = None) -> str: if not name: name = self.DEFAULT_NAME", "QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self)", "self.__tests and new_name != ignore_name: new_name = f'{name}-{k}' k += 1 return new_name", "test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO: возможно лучше предложить", "in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None:", "TODO: возможно лучше предложить выбор: загрузка с заменой или с добавлением в конец", "for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO: возможно лучше", "= DEFAULT_NAME, ignore_name: str = None) -> str: if not name: name =", "загрузка с заменой или с добавлением в конец def set_from_file(self, file: dict) ->", "= dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged", "from widgets.tape import Tape class Data: test: QListWidgetItem state: dict saved_state: dict class", "event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile()))", "str = None) -> str: if not name: name = self.DEFAULT_NAME k =", "self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if state is not", "return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name:", "None: self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool:", "QListWidgetItem = None, reset: bool = True, saved_state: dict = None) -> QListWidgetItem:", "0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if", "dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self,", "= self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for test_name in self.__tests:", "if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data()", "test self.__tests[test_name].state = state if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state =", "= 1 new_name = name while new_name in self.__tests and new_name != ignore_name:", "str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None:", "QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state", "import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class", "has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept()", "parent: any = None): super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged", "QListWidgetItem, global_: bool = False) -> None: if test.text() in self.__tests: if global_:", "None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool", "self.get_test_name() if test is None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags()", "None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов", "test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def", "-> str: if not name: name = self.DEFAULT_NAME k = 1 new_name =", "k = 1 new_name = name while new_name in self.__tests and new_name !=", "<reponame>alexoff13/QTpost-machine from typing import Union from time import time from PyQt5 import QtCore", "test: QListWidgetItem = None, reset: bool = True, saved_state: dict = None) ->", "while new_name in self.__tests and new_name != ignore_name: new_name = f'{name}-{k}' k +=", "import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data: test: QListWidgetItem state:", "test def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if", "new_name in self.__tests and new_name != ignore_name: new_name = f'{name}-{k}' k += 1", "in self.__tests and new_name != ignore_name: new_name = f'{name}-{k}' k += 1 return", "= test self.__tests[test_name].state = state if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state", "from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data: test:", "1 new_name = name while new_name in self.__tests and new_name != ignore_name: new_name", "def has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls():", "def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count() > 0", "bool = False) -> None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state =", "Tape, parent: any = None): super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list()", "dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent: any =", "str: if not name: name = self.DEFAULT_NAME k = 1 new_name = name", "= 'test' def __init__(self, tape: Tape, parent: any = None): super().__init__(parent) self.__tape =", "QListWidgetItem: if test_name is None: test_name = self.get_test_name() if test is None: test", "test_name = self.get_test_name() if test is None: test = QListWidgetItem() test.setText(test_name) test.last_name =", "возможно лучше предложить выбор: загрузка с заменой или с добавлением в конец def", "def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность", "def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls():", "- 1) if self.count() > 0 else None def get_state(self, test: QListWidgetItem) ->", "= self.__tests[test_name].state return data # TODO: возможно лучше предложить выбор: загрузка с заменой", "-> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких", "устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True)", "def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if internal_remove:", "test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) >", "float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) -> None: #", "test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test =", "dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent:", "TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent: any = None): super().__init__(parent)", "return self.item(self.count() - 1) if self.count() > 0 else None def get_state(self, test:", "remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text())", "dict: data = dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data", "__set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу", "not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test: QListWidgetItem,", "dict = None) -> QListWidgetItem: if test_name is None: test_name = self.get_test_name() if", "test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] =", "в конец def set_from_file(self, file: dict) -> None: self.clear() for test_name in file:", "True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return", "super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url", "= name while new_name in self.__tests and new_name != ignore_name: new_name = f'{name}-{k}'", "-> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count() > 0 else None", "test: QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def", "super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event):", "global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data", "__init__(self, tape: Tape, parent: any = None): super().__init__(parent) self.__tape = tape self.__tests =", "None) -> str: if not name: name = self.DEFAULT_NAME k = 1 new_name", "str = None, state: dict = None, test: QListWidgetItem = None, reset: bool", "PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data: test: QListWidgetItem", "= tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) ->", "else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) -> None: self.__last_time_dragged = time()", "-> dict: data = dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return", "url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction]) ->", "def get_data(self) -> dict: data = dict() for test_name in self.__tests: data[test_name] =", "clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem,", "if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else:", "self.__tests[test_name].saved_state = saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove: bool = True)", "None): super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property", "import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from", "PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape", "с добавлением в конец def set_from_file(self, file: dict) -> None: self.clear() for test_name", "file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def dragEnterEvent(self, event):", "-> bool: return len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else:", "возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True)", "нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str", "name: str = DEFAULT_NAME, ignore_name: str = None) -> str: if not name:", "self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает", "return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name)", "None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if", "def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name:", "get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict:", "= Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if state is not None", "QListWidgetItem from widgets.tape import Tape class Data: test: QListWidgetItem state: dict saved_state: dict", "Data: test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def", "data = dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data #", "if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict:", "get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str)", "get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None) -> str: if not", "test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def", "> 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event):", "@property def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) -> None: # устанавливает", "dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in event.mimeData().urls():", "in self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO: возможно лучше предложить выбор:", "def add_test(self, test_name: str = None, state: dict = None, test: QListWidgetItem =", "self.__tape.get_data() def get_data(self) -> dict: data = dict() for test_name in self.__tests: data[test_name]", "str = DEFAULT_NAME, ignore_name: str = None) -> str: if not name: name", "event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept()", "self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name", "self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self)", "test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self,", "internal_remove: bool = True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) ->", "get_data(self) -> dict: data = dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state", "saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None:", "Tape class Data: test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME =", "добавлением в конец def set_from_file(self, file: dict) -> None: self.clear() for test_name in", "self.item(self.count() - 1) if self.count() > 0 else None def get_state(self, test: QListWidgetItem)", "if not name: name = self.DEFAULT_NAME k = 1 new_name = name while", "def __init__(self, tape: Tape, parent: any = None): super().__init__(parent) self.__tape = tape self.__tests", "= None) -> QListWidgetItem: if test_name is None: test_name = self.get_test_name() if test", "self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str,", "name = self.DEFAULT_NAME k = 1 new_name = name while new_name in self.__tests", "элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str =", "float: return self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop)", "if self.count() > 0 else None def get_state(self, test: QListWidgetItem) -> dict: return", "bool = True, saved_state: dict = None) -> QListWidgetItem: if test_name is None:", "new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) ->", "reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if state", "self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None) ->", "else None def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test:", "= True, saved_state: dict = None) -> QListWidgetItem: if test_name is None: test_name", "get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count() > 0 else", "Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data:", "QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data: test: QListWidgetItem state: dict saved_state:", "ignore_name: str = None) -> str: if not name: name = self.DEFAULT_NAME k", "# TODO: возможно лучше предложить выбор: загрузка с заменой или с добавлением в", "-> None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state", "с заменой или с добавлением в конец def set_from_file(self, file: dict) -> None:", "event.accept() links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def", "event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list()", "= test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test", "self.count() > 0 else None def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state", "= None) -> str: if not name: name = self.DEFAULT_NAME k = 1", "def rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name =", "return test def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) -> None: self.takeItem(self.row(test))", "конец def set_from_file(self, file: dict) -> None: self.clear() for test_name in file: self.add_test(test_name,", "None]: return self.item(self.count() - 1) if self.count() > 0 else None def get_state(self,", "event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event)", "устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str =", "name while new_name in self.__tests and new_name != ignore_name: new_name = f'{name}-{k}' k", "class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent: any = None):", "1 return new_name def add_test(self, test_name: str = None, state: dict = None,", "-> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def", "self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool = False) -> None: if test.text()", "self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if state is", "test: QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) ->", "tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float() @property def last_time_dragged(self) -> float:", "from typing import Union from time import time from PyQt5 import QtCore from", "элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self,", "saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def dragEnterEvent(self, event): if", "QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import Tape class Data: test: QListWidgetItem state: dict", "= self.DEFAULT_NAME k = 1 new_name = name while new_name in self.__tests and", "None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for test_name", "rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name", "= self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data = dict()", "-> None: self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for", "-> QListWidgetItem: if test_name is None: test_name = self.get_test_name() if test is None:", "= saved_state return test def remove_test(self, test: QListWidgetItem, internal_remove: bool = True) ->", "from PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView,", "global_: bool = False) -> None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state", "self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data =", "is None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test)", "for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool =", "self.DEFAULT_NAME k = 1 new_name = name while new_name in self.__tests and new_name", "new_name != ignore_name: new_name = f'{name}-{k}' k += 1 return new_name def add_test(self,", "event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction)", "QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name]", "import Tape class Data: test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME", "new_name = f'{name}-{k}' k += 1 return new_name def add_test(self, test_name: str =", "QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape", "self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name: str = None)", "import Union from time import time from PyQt5 import QtCore from PyQt5.QtCore import", "self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) -> bool: return len(self.__tests) > 0 def dragEnterEvent(self,", "def clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test:", "self.__tests[test_name].state = state if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state", "else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self,", "= None, test: QListWidgetItem = None, reset: bool = True, saved_state: dict =", "def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in", "data # TODO: возможно лучше предложить выбор: загрузка с заменой или с добавлением", "self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) ->", "self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state = state", "-> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_:", "saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent: any", "Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) # устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def", "dict = None, test: QListWidgetItem = None, reset: bool = True, saved_state: dict", "= QListWidgetItem() test.setText(test_name) test.last_name = test_name test.setFlags(test.flags() | Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset()", "list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions,", "def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self, test: QListWidgetItem) ->", "any = None): super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged =", "возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME,", "DEFAULT_NAME = 'test' def __init__(self, tape: Tape, parent: any = None): super().__init__(parent) self.__tape", "или с добавлением в конец def set_from_file(self, file: dict) -> None: self.clear() for", "dict) -> None: self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self)", "None, reset: bool = True, saved_state: dict = None) -> QListWidgetItem: if test_name", "time import time from PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets", "state if state is not None else Tape.get_empty_data() self.__tests[test_name].saved_state = saved_state return test", "return self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов self.setDragDropMode(QAbstractItemView.DragDrop) #", "0 else None def get_state(self, test: QListWidgetItem) -> dict: return self.__tests[test.text()].state def get_saved_state(self,", "event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links", "= self.get_test_name() if test is None: test = QListWidgetItem() test.setText(test_name) test.last_name = test_name", "= float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) -> None:", "self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1)", "self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO: возможно лучше предложить выбор: загрузка", "PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem", "last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop", "-> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name]", "if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if", "return len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event) def", "for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event) def startDrag(self, supported_actions: Union[QtCore.Qt.DropActions, QtCore.Qt.DropAction])", "= None, reset: bool = True, saved_state: dict = None) -> QListWidgetItem: if", "self.__tape.get_data() else: self.__tests[test.text()].state = self.__tape.get_data() def get_data(self) -> dict: data = dict() for", "= f'{name}-{k}' k += 1 return new_name def add_test(self, test_name: str = None,", "state: dict = None, test: QListWidgetItem = None, reset: bool = True, saved_state:", "if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if", "def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность", "= new_test_name def clear(self) -> None: for test_name in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def", "f'{name}-{k}' k += 1 return new_name def add_test(self, test_name: str = None, state:", "-> None: self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name]) def has_unsaved_data(self) ->", "None, test: QListWidgetItem = None, reset: bool = True, saved_state: dict = None)", "in self.__tests: self.takeItem(self.row(self.__tests[test_name].test)) self.__tests.clear() def save_state(self, test: QListWidgetItem, global_: bool = False) ->", "bool: return len(self.__tests) > 0 def dragEnterEvent(self, event): if event.mimeData().hasUrls(): event.accept() else: super().dragEnterEvent(event)", "name: name = self.DEFAULT_NAME k = 1 new_name = name while new_name in", "!= ignore_name: new_name = f'{name}-{k}' k += 1 return new_name def add_test(self, test_name:", "-> float: return self.__last_time_dragged def __set_list(self) -> None: # устанавливает возможность Drag'n'Drop элементов", "None: test_name = self.get_test_name() if test is None: test = QListWidgetItem() test.setText(test_name) test.last_name", "Data() self.__tests[test_name].test = test self.__tests[test_name].state = state if state is not None else", "сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str = DEFAULT_NAME, ignore_name:", "event.accept() else: super().dragEnterEvent(event) def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def", "event.setDropAction(Qt.CopyAction) event.accept() links = list() for url in event.mimeData().urls(): links.append(str(url.toLocalFile())) else: event.setDropAction(Qt.MoveAction) super().dropEvent(event)", "new_name def add_test(self, test_name: str = None, state: dict = None, test: QListWidgetItem", "предложить выбор: загрузка с заменой или с добавлением в конец def set_from_file(self, file:", "None: if test.text() in self.__tests: if global_: self.__tests[test.text()].saved_state = self.__tape.get_data() else: self.__tests[test.text()].state =", "dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) -> None: self.__tests[new_test_name] =", "= None): super().__init__(parent) self.__tape = tape self.__tests = dict() self.__set_list() self.__last_time_dragged = float()", "лучше предложить выбор: загрузка с заменой или с добавлением в конец def set_from_file(self,", "new_name = name while new_name in self.__tests and new_name != ignore_name: new_name =", "# устанавливает возможность сразу нескольких элементов self.setSelectionMode(QAbstractItemView.ExtendedSelection) self.setAcceptDrops(True) self.setDropIndicatorShown(True) def get_test_name(self, name: str", "self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]: return self.item(self.count() - 1) if self.count() >", "is None: test_name = self.get_test_name() if test is None: test = QListWidgetItem() test.setText(test_name)", "from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget, QAbstractItemView, QListWidgetItem from widgets.tape import", "| Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state", "time from PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QListWidget,", "event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() links =", "ignore_name: new_name = f'{name}-{k}' k += 1 return new_name def add_test(self, test_name: str", "import time from PyQt5 import QtCore from PyQt5.QtCore import Qt from PyQt5.QtWidgets import", "add_test(self, test_name: str = None, state: dict = None, test: QListWidgetItem = None,", "1) if self.count() > 0 else None def get_state(self, test: QListWidgetItem) -> dict:", "QListWidgetItem) -> dict: return self.__tests[test.text()].saved_state def rename(self, test_name: str, new_test_name: str) -> None:", "self.__tests[new_test_name] = self.__tests.pop(test_name) self.__tests[new_test_name].test.last_name = new_test_name def clear(self) -> None: for test_name in", "= self.__tape.get_data() def get_data(self) -> dict: data = dict() for test_name in self.__tests:", "test_name: str = None, state: dict = None, test: QListWidgetItem = None, reset:", "bool = True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem,", "Union from time import time from PyQt5 import QtCore from PyQt5.QtCore import Qt", "+= 1 return new_name def add_test(self, test_name: str = None, state: dict =", "self.__last_time_dragged = float() @property def last_time_dragged(self) -> float: return self.__last_time_dragged def __set_list(self) ->", "widgets.tape import Tape class Data: test: QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget):", "set_from_file(self, file: dict) -> None: self.clear() for test_name in file: self.add_test(test_name, file[test_name], saved_state=file[test_name])", "= dict() for test_name in self.__tests: data[test_name] = self.__tests[test_name].state return data # TODO:", "Qt.ItemIsEditable) self.addItem(test) if reset: self.__tape.reset() self.__tests[test_name] = Data() self.__tests[test_name].test = test self.__tests[test_name].state =", "return new_name def add_test(self, test_name: str = None, state: dict = None, test:", "QListWidgetItem state: dict saved_state: dict class TestsList(QListWidget): DEFAULT_NAME = 'test' def __init__(self, tape:", "def dragMoveEvent(self, event): if event.mimeData().hasUrls(): event.setDropAction(Qt.CopyAction) event.accept() else: super().dragMoveEvent(event) def dropEvent(self, event): if", "= True) -> None: self.takeItem(self.row(test)) if internal_remove: self.__tests.pop(test.text()) def get_last(self) -> Union[QListWidgetItem, None]:", "def set_from_file(self, file: dict) -> None: self.clear() for test_name in file: self.add_test(test_name, file[test_name],", "def save_state(self, test: QListWidgetItem, global_: bool = False) -> None: if test.text() in", "None) -> QListWidgetItem: if test_name is None: test_name = self.get_test_name() if test is" ]
[ "raise Exception('Failed to move from %s to %s. Reason %s' % (srcFilePath, dstFilePath,", "dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed", "Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\");", "% (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename", "raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder =", "gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\");", "= \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" +", "dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except", "in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or", "dstFilePath); except Exception as e: raise Exception('Failed to copy from %s to %s.", "filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath);", "% (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for", "clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e:", "fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \"", "os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def", "except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\");", "try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to move from %s", "= \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action + \" \" + str(len(fileNames))", "+ \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted", "% (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder", "BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action + \" \" +", "= []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f);", "\"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\");", "as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname", "os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst =", "Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging", "e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames:", "+ SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e;", "filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e:", "os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed", "copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to copy", "shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to copy from %s to", "%s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames);", "\"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action + \" \" + str(len(fileNames)) +", "shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to delete %s. Reason: %s' %", "= getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \" to \" + SOURCE_FOLDER);", "+ action + \" \" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames", "+ str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def", "\"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\");", "try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e;", "= os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try:", "to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\");", "os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to delete %s.", "e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception", "e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else:", "TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action +", "if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up", "fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \" to \"", "%s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp", "moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise", "fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename);", "%s. Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try:", "def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as", "move from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder,", "+ \" \" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder);", "as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER);", "str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath):", "dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src = os.path.join(srcFolder,", "deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder,", "except Exception as e: raise Exception('Failed to move from %s to %s. Reason", "Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\");", "os, shutil from datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER", "shutil from datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER =", "fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder, filename);", "datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def", "f + \" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as", "e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER,", "backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for", "for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f +", "gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')),", "SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def", "printAction(\"Copying\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder,", "filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst);", "except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\");", "SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish", "\"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1):", "+ f + \" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception", "deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as", "dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to copy from", "fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename);", "in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def", "fileNames): print(\"\\n\" + action + \" \" + str(len(fileNames)) + \" files\"); def", "[]; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing", "Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing", "gui.consoleInsert(\"Backing up \" + f + \" to \" + SOURCE_FOLDER); try: copyFile(f,", "= \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action + \"", "gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder", "delete %s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder);", "dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to move from", "e: raise Exception('Failed to copy from %s to %s. Reason %s' % (srcFilePath,", "from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning", "clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e:", "try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to copy from %s", "clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e:", "gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui):", "= os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder):", "files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\");", "print(\"\\n\" + action + \" \" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder):", "getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames);", "(filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in", "dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except", "shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to move from %s to", "filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in", "except Exception as e: raise Exception('Failed to delete %s. Reason: %s' % (filePath,", "fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try:", "fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath,", "fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly =", "%s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp", "Exception('Failed to copy from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e));", "Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning", "\" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise", "e: raise Exception('Failed to move from %s to %s. Reason %s' % (srcFilePath,", "os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath,", "os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as", "Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames);", "as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER);", "gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui,", "e: raise Exception('Failed to delete %s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder,", "try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup", "import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action,", "def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as", "def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src", "gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e),", "gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e),", "\" \" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return", "file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath);", "(srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\");", "dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except", "def printAction(action, fileNames): print(\"\\n\" + action + \" \" + str(len(fileNames)) + \"", "Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try:", "splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path", "Exception('Failed to delete %s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames", "moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to move", "def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\",", "gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui):", "e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result", "up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname:", "copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\");", "copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src =", "for filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src,", "\"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if not os.path.exists(os.path.dirname(stagingFolder)): os.makedirs(stagingFolder); copyAllFiles(TARGET_FOLDER, stagingFolder);", "\" + f + \" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except", "fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \" to \" +", "copy from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui):", "= os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try:", "deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def", "def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to", "action + \" \" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames =", "return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames:", "or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to", "from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder):", "Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\",", "Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result", "files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\");", "Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise", "getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \" to \" + SOURCE_FOLDER); try:", "Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\");", "def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if", "os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception as", "dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in", "def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return", "filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath);", "fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath,", "= filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename", "os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath,", "dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed", "files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in fname: fileNameOnly", "def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception", "def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src", "deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def", "countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1];", "(srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename", "if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise", "len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder);", "return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames =", "in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def", "fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def", "= os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst", "to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames =", "os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to delete", "= os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception", "def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise Exception('Failed to", "Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\");", "filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f", "try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e;", "= os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path);", "deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except", "raise Exception('Failed to delete %s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder):", "as e: raise Exception('Failed to move from %s to %s. Reason %s' %", "+ \" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e:", "raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except", "except Exception as e: raise Exception('Failed to copy from %s to %s. Reason", "e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\",", "dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src = os.path.join(srcFolder,", "printAction(\"Deleting\", fileNames); for filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath):", "as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging", "%s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames", "os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath):", "datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames):", "%s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\",", "try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e:", "printAction(action, fileNames): print(\"\\n\" + action + \" \" + str(len(fileNames)) + \" files\");", "%s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for", "src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath):", "Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]):", "splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for", "%s. Reason %s' % (srcFilePath, dstFilePath, e)); def copyAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder);", "backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage", "e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception", "filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif os.path.isdir(filePath): shutil.rmtree(filePath)", "\" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e),", "gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = [];", "gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if not os.path.exists(os.path.dirname(stagingFolder)): os.makedirs(stagingFolder);", "gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames; for f in", "stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if not", "= os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst", "Exception as e: raise Exception('Failed to move from %s to %s. Reason %s'", "raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except", "as e: raise Exception('Failed to copy from %s to %s. Reason %s' %", "= os.path.join(dstFolder, filename); moveFile(src, dst); def moveFile(srcFilePath, dstFilePath): try: shutil.move(srcFilePath, dstFilePath); except Exception", "elif os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to delete %s. Reason:", "\"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action + \" \"", "Reason %s' % (srcFilePath, dstFilePath, e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER);", "e)); def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception", "gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e),", "if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if not os.path.exists(os.path.dirname(stagingFolder)):", "\" + str(len(fileNames)) + \" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames);", "= os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath): os.unlink(filePath); elif", "dstFilePath); except Exception as e: raise Exception('Failed to move from %s to %s.", "\" files\"); def countFiles(pathToFolder): fileNames = os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted =", "gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning Result Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder", "Exception as e: raise Exception('Failed to copy from %s to %s. Reason %s'", "as e: raise Exception('Failed to delete %s. Reason: %s' % (filePath, e)); def", "os.listdir(srcFolder); printAction(\"Copying\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst =", "Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None):", "def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames; else: fname=filenames;", "Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearResultFolder(gui): gui.consoleInsert(\"Cleaning", "e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging", "for filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src,", "filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e:", "def clearTempFolder(gui): gui.consoleInsert(\"Cleaning Temp Folder\"); try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as", "e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname =", "to copy from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def", "up \" + f + \" to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly);", "except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None):", "printAction(\"Moving\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder,", "e)); def moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames:", "in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \" to", "fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath) or os.path.islink(filePath):", "\"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\" + action", "Exception as e: raise Exception('Failed to delete %s. Reason: %s' % (filePath, e));", "os.path.isdir(filePath): shutil.rmtree(filePath) except Exception as e: raise Exception('Failed to delete %s. Reason: %s'", "try: deleteAllFiles(SOURCE_FOLDER); gui.consoleInsert(\"Temp Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e;", "filename in fileNames: src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); moveFile(src, dst);", "for filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if", "deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def", "raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up files...\"); fname = []; if(len(filenames)<1): fname=gui.filenames;", "import os, shutil from datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\";", "<gh_stars>0 import os, shutil from datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER =", "fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f", "copyFile(src, dst); def copyFile(srcFilePath, dstFilePath): try: shutil.copyfile(srcFilePath, dstFilePath); except Exception as e: raise", "except Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def backupFiles(gui, filenames=[]): gui.consoleInsert(\"Backing up", "result\", \"blue\"); stagingFolder = os.path.join(BACKUP_FOLDER, str(datetime.today().strftime('%d-%m-%Y_%H%M%S')), \"stage result\"); if not os.path.exists(os.path.dirname(stagingFolder)): os.makedirs(stagingFolder); copyAllFiles(TARGET_FOLDER,", "to move from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e)); def", "os.listdir(pathToFolder); return len(fileNames); def getFileNameOnly(filePath): splitted = filePath.split(\"/\"); return splitted[len(splitted)-1]; def deleteAllFiles(pathToFolder): fileNames", "f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" + f + \"", "fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src = os.path.join(srcFolder, filename);", "from datetime import datetime; SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\";", "def deleteAllFiles(pathToFolder): fileNames = os.listdir(pathToFolder); printAction(\"Deleting\", fileNames); for filename in fileNames: file_path =", "SOURCE_FOLDER = \"./temp\"; TARGET_FOLDER = \"./RESULT\"; BACKUP_FOLDER = \"./STAGING\"; def printAction(action, fileNames): print(\"\\n\"", "Exception as e: gui.consoleInsert(str(e), \"red\"); raise e; def clearStagingFolder(gui): gui.consoleInsert(\"Cleaning Staging Folder\"); try:", "filename in fileNames: file_path = os.path.join(pathToFolder, filename); deleteFile(file_path); def deleteFile(filePath): try: if os.path.isfile(filePath)", "to delete %s. Reason: %s' % (filePath, e)); def moveAllFiles(srcFolder, dstFolder): fileNames =", "moveAllFiles(srcFolder, dstFolder): fileNames = os.listdir(srcFolder); printAction(\"Moving\", fileNames); for filename in fileNames: src =", "\"red\"); raise e; gui.consoleInsert(\"Finish backup files\"); def stagingResult(gui=None): if(gui!=None): gui.consoleInsert(\"Staging result\", \"blue\"); stagingFolder", "Folder\"); try: deleteAllFiles(TARGET_FOLDER); gui.consoleInsert(\"Result Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise", "Exception('Failed to move from %s to %s. Reason %s' % (srcFilePath, dstFilePath, e));", "else: fname=filenames; for f in fname: fileNameOnly = getFileNameOnly(f); gui.consoleInsert(\"Backing up \" +", "src = os.path.join(srcFolder, filename); dst = os.path.join(dstFolder, filename); copyFile(src, dst); def copyFile(srcFilePath, dstFilePath):", "raise Exception('Failed to copy from %s to %s. Reason %s' % (srcFilePath, dstFilePath,", "to \" + SOURCE_FOLDER); try: copyFile(f, SOURCE_FOLDER+\"/\"+fileNameOnly); except Exception as e: gui.consoleInsert(str(e), \"red\");", "Folder\"); try: deleteAllFiles(BACKUP_FOLDER); gui.consoleInsert(\"Staging Folder Cleaned\"); except Exception as e: gui.consoleInsert(str(e), \"red\"); raise" ]
[ "== 'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system import", "if sys.platform == 'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from", "sys if sys.platform == 'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32':", "elif sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'):", "import * elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from", "..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif sys.platform", "'win32': from ..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import", "from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif", "elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import", "sys.platform == 'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system", "sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import *", "* elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system", "import sys if sys.platform == 'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform ==", "..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else:", "'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import * def path_for_url(path): return", "from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import * def path_for_url(path): return _path_for_url(path)", "import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif sys.platform in", "from ..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system import *", "in ('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import * def", "('linux', 'linux2'): from ..lib.platform.linux.file_system import * else: from ..lib.platform.unsupported.file_system import * def path_for_url(path):", "'darwin': from ..lib.platform.darwin.file_system import * elif sys.platform == 'win32': from ..lib.platform.win32.file_system import *", "* elif sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif sys.platform in ('linux',", "sys.platform == 'win32': from ..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'): from", "== 'win32': from ..lib.platform.win32.file_system import * elif sys.platform in ('linux', 'linux2'): from ..lib.platform.linux.file_system" ]
[ "input file from the path name was not found by loader\"\"\" pass class", "class FileIsNotFoundError(Error): \"\"\"Raised when the input file from the path name was not", "pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file from the path name was", "was not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data", "when the loaded data set has missing one or more of the important", "\"\"\"Raised when the path name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when", "pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has missing one or", "when the path name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the", "format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file from the path name", "WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised", "the input file from the path name was not found by loader\"\"\" pass", "class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong format\"\"\" pass class FileIsNotFoundError(Error):", "the path name was not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when", "file from the path name was not found by loader\"\"\" pass class MissingImportantColumnsError(Error):", "from the path name was not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised", "when the input file from the path name was not found by loader\"\"\"", "path name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file", "the loaded data set has missing one or more of the important columns\"\"\"", "<reponame>Sale1996/automatization_of_data_mining_project<gh_stars>0 class Error(Exception): \"\"\"Base class for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when", "FileIsNotFoundError(Error): \"\"\"Raised when the input file from the path name was not found", "by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has missing", "\"\"\"Base class for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name", "the path name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input", "has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file from the", "class for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has", "pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong format\"\"\" pass class", "wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file from the path", "not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set", "\"\"\"Raised when the input file from the path name was not found by", "class Error(Exception): \"\"\"Base class for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the", "name was not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded", "path name was not found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the", "\"\"\"Raised when the loaded data set has missing one or more of the", "exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong format\"\"\" pass", "for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong", "found by loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has", "name has wrong format\"\"\" pass class FileIsNotFoundError(Error): \"\"\"Raised when the input file from", "loader\"\"\" pass class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has missing one", "loaded data set has missing one or more of the important columns\"\"\" pass", "other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path name has wrong format\"\"\"", "class MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has missing one or more", "Error(Exception): \"\"\"Base class for other exceptions\"\"\" pass class WrongPathNameFormatError(Error): \"\"\"Raised when the path", "MissingImportantColumnsError(Error): \"\"\"Raised when the loaded data set has missing one or more of" ]
[ "from odoo import api, fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids =", "= event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not", "Odoo. See LICENSE file for full copyright and licensing details. from odoo import", "LICENSE file for full copyright and licensing details. from odoo import api, fields,", "meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for event in", "= fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\",", "@api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu update:", "= event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu =", "!= event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or not", "event.community_menu = True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count", "for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation", "event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu !=", "fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let", "self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for", "\"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu update: synchronize. \"\"\"", "event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self:", "Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At", "readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At", "\"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in", "in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu", "event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or not", "fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow", "coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright", "= self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"]", "def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count =", "result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for event in self: event.meeting_room_count =", "Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu", "_inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\",", "event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or not event.meeting_room_allow_creation: event.meeting_room_allow_creation = False", "if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu", "{ result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for event in self: event.meeting_room_count", "for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self):", "and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or not event.meeting_room_allow_creation:", "in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu", "synchronize. \"\"\" for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu", "= True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count =", "def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation", "-*- # Part of Odoo. See LICENSE file for full copyright and licensing", "\"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room", "fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\")", "= fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean(", "event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu", "} for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def", "!= event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or not event.meeting_room_allow_creation: event.meeting_room_allow_creation =", "\"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and event.event_type_id !=", "fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count }", "copyright and licensing details. from odoo import api, fields, models class Event(models.Model): _inherit", "result in meeting_room_count } for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\",", "and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu:", "event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif", "meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation =", "help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange:", "self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and", "event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif", "True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group(", "= \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\")", "event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"],", "models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count", "groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for", "licensing details. from odoo import api, fields, models class Event(models.Model): _inherit = \"event.event\"", "meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]:", "string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\",", "odoo import api, fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\",", "# Part of Odoo. See LICENSE file for full copyright and licensing details.", "!= event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation =", "full copyright and licensing details. from odoo import api, fields, models class Event(models.Model):", "event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu !=", "in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event", "Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def", "(event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu", "not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\",", "update: synchronize. \"\"\" for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id:", "event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu", "-*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full", "!= event._origin.website_menu or not event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu =", "event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif", "if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu", "fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\",", "self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and", "type onchange: synchronize. At website_menu update: synchronize. \"\"\" for event in self: if", "event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu = True", "compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create", "def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu update: synchronize. \"\"\" for", "= { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for event in self:", "meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id", "onchange: synchronize. At website_menu update: synchronize. \"\"\" for event in self: if event.event_type_id", "utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and", "Part of Odoo. See LICENSE file for full copyright and licensing details. from", "event._origin.website_menu or not event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu = False", "event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self):", "False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], )", "# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for", "in meeting_room_count } for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\",", "class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count =", "and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu", "\"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id:", "\"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu update: synchronize.", "event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for", "event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or", "self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count", "0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and", "and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu = True elif not event.website_menu:", "for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu", "See LICENSE file for full copyright and licensing details. from odoo import api,", "website_menu update: synchronize. \"\"\" for event in self: if event.event_type_id and event.event_type_id !=", "meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\")", "event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu = True elif not", "Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type onchange: synchronize.", "elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or", "rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\",", "meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False,", "At type onchange: synchronize. At website_menu update: synchronize. \"\"\" for event in self:", "api, fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting", "<filename>addons/website_event_meet/models/event_event.py # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file", "_compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation =", "or not event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\")", "_compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = {", "for full copyright and licensing details. from odoo import api, fields, models class", "details. from odoo import api, fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids", "file for full copyright and licensing details. from odoo import api, fields, models", "of Odoo. See LICENSE file for full copyright and licensing details. from odoo", "= False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"],", "\"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\")", ") meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result in meeting_room_count } for event", "\"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation", "event.event_type_id != event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation", "\"\"\" At type onchange: synchronize. At website_menu update: synchronize. \"\"\" for event in", "result[\"event_id_count\"] for result in meeting_room_count } for event in self: event.meeting_room_count = meeting_room_count.get(event.id,", "event._origin.event_type_id: event.community_menu = event.event_type_id.community_menu elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu):", "At website_menu update: synchronize. \"\"\" for event in self: if event.event_type_id and event.event_type_id", "store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\" At type", "for result in meeting_room_count } for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0)", "not event.community_menu): event.community_menu = True elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def", "and licensing details. from odoo import api, fields, models class Event(models.Model): _inherit =", "count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors", "self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in", "elif not event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\",", "Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\", string=\"Meeting rooms\") meeting_room_count = fields.Integer(\"Room", "synchronize. At website_menu update: synchronize. \"\"\" for event in self: if event.event_type_id and", "Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self):", "\"\"\" for event in self: if event.event_type_id and event.event_type_id != event._origin.event_type_id: event.community_menu =", "= fields.Integer(\"Room count\", compute=\"_compute_meeting_room_count\") meeting_room_allow_creation = fields.Boolean( \"Allow Room Creation\", compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True,", "_compute_community_menu(self): \"\"\" At type onchange: synchronize. At website_menu update: synchronize. \"\"\" for event", "@api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count", "import api, fields, models class Event(models.Model): _inherit = \"event.event\" meeting_room_ids = fields.One2many(\"event.meeting.room\", \"event_id\",", "event._origin.event_type_id: event.meeting_room_allow_creation = event.event_type_id.meeting_room_allow_creation elif event.community_menu and event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True", "elif event.website_menu and (event.website_menu != event._origin.website_menu or not event.community_menu): event.community_menu = True elif", "@api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if event.event_type_id and event.event_type_id", "= meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\") def _compute_meeting_room_allow_creation(self): for event in self: if", "domain=[(\"event_id\", \"in\", self.ids)], fields=[\"id:count\"], groupby=[\"event_id\"], ) meeting_room_count = { result[\"event_id\"][0]: result[\"event_id_count\"] for result", "meeting_room_count } for event in self: event.meeting_room_count = meeting_room_count.get(event.id, 0) @api.depends(\"event_type_id\", \"community_menu\", \"meeting_room_allow_creation\")", "event.community_menu != event._origin.community_menu: event.meeting_room_allow_creation = True elif not event.community_menu or not event.meeting_room_allow_creation: event.meeting_room_allow_creation", "compute=\"_compute_meeting_room_allow_creation\", readonly=False, store=True, help=\"Let Visitors Create Rooms\") @api.depends(\"event_type_id\", \"website_menu\", \"community_menu\") def _compute_community_menu(self): \"\"\"", "event.website_menu: event.community_menu = False @api.depends(\"meeting_room_ids\") def _compute_meeting_room_count(self): meeting_room_count = self.env[\"event.meeting.room\"].sudo().read_group( domain=[(\"event_id\", \"in\", self.ids)]," ]
[ "cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson != \"\" \"\"\" )", "\"\"\" ) geography_datasets = [x[0] for x in cur] conn.close() return geography_datasets def", "proc.check_returncode() # raise exception on nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n----", "argparse import sqlite3 import subprocess import multiprocessing as mp from pathlib import Path", "default=Path(\"var/cache/\"), help=\"The numbers available to use (six must be provided)\", ) cmd_args =", "capture_output=True, text=True) try: proc.check_returncode() # raise exception on nonz-ero return code except subprocess.CalledProcessError", "entity WHERE geojson != \"\" \"\"\" ) geography_datasets = [x[0] for x in", "create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to", "proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception on nonz-ero return", "to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers", "\"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\",", "build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\")", "sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\",", "\",\".join(x[0] for x in cur) results = results.rstrip(\",\") return results def create_geojson_file(features, output_path,", "help=\"The numbers available to use (six must be provided)\", ) cmd_args = parser.parse_args()", "def run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception on", "import multiprocessing as mp from pathlib import Path from itertools import repeat def", "import argparse import sqlite3 import subprocess import multiprocessing as mp from pathlib import", "def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\",", "= results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features", "\"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [", "WHERE geojson != \"\" \"\"\" ) geography_datasets = [x[0] for x in cur]", "print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur", "type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six must be provided)\",", "\"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query =", "open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\",", "raise e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute(", "\"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson,", "(dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x in cur) results = results.rstrip(\",\")", "SELECT DISTINCT dataset FROM entity WHERE geojson != \"\" \"\"\" ) geography_datasets =", "\"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path,", "\"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT", "== ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x in cur)", "output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as", "STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn =", "geography_datasets = [x[0] for x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None):", "FROM entity WHERE geojson != \"\" \"\"\" ) geography_datasets = [x[0] for x", "conn.cursor() if dataset: query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query)", "nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1,", "= [x[0] for x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn", "!= '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query += \"AND", ") cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path)", "# raise exception on nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR", "dataset FROM entity WHERE geojson != \"\" \"\"\" ) geography_datasets = [x[0] for", "geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\",", "for x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path)", "database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use", "nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\")", "= sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE", ") geography_datasets = [x[0] for x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path,", "json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS oe ON entity.organisation_entity = oe.entity", "AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) )", "?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x in cur) results", "f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset)", "as mp from pathlib import Path from itertools import repeat def run(command): proc", "mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\",", "= get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap( build_tiles, zip(repeat(entity_path), repeat(output_path), datasets) )", "+ \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd =", "\"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query", "subprocess import multiprocessing as mp from pathlib import Path from itertools import repeat", "'' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query += \"AND entity.dataset", "e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\"", "WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query", "sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson", "\"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\",", "from pathlib import Path from itertools import repeat def run(command): proc = subprocess.run(command,", "= \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS oe ON", "with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\",", "as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\",", "output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is None: dataset =", "output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap( build_tiles,", "= get_dataset_features(entity_path, dataset) if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset)", "dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles", "= '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def", "cur = conn.cursor() if dataset: query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,))", "if dataset: query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results", "nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six must be provided)\", )", "\"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT", "entity LEFT JOIN entity AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson !=", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path,", "dataset: query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results =", "\"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\",", "x in cur) results = results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson", "\"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\",", "f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path,", "] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset", "DISTINCT dataset FROM entity WHERE geojson != \"\" \"\"\" ) geography_datasets = [x[0]", "query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS oe", "return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT", "None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\":", "JOIN entity AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format(", "conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson != \"\" \"\"\"", "\"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\"", "= conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson != \"\"", "import repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise", "build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ]", "proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT", "import sqlite3 import subprocess import multiprocessing as mp from pathlib import Path from", "multiprocessing as mp from pathlib import Path from itertools import repeat def run(command):", "return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise", "numbers available to use (six must be provided)\", ) cmd_args = parser.parse_args() entity_path", "parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"),", "\"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset):", "exception on nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n----", "\"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query += \"AND entity.dataset ==", "x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties", "dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\",", "f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\",", "required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False,", "create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\")", "available to use (six must be provided)\", ) cmd_args = parser.parse_args() entity_path =", "raise exception on nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\")", "\"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\",", "\"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six must be", "query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0]", "print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features,", "def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\",", "f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if", "use (six must be provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path", "features = get_dataset_features(entity_path, dataset) if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path,", "parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument(", "\"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\",", "properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query += \"AND entity.dataset == ?\"", "geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson)", "dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is None: dataset = \"dataset_tiles\"", "ON entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur =", "+= \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for", "results = \",\".join(x[0] for x in cur) results = results.rstrip(\",\") return results def", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\",", "datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap( build_tiles, zip(repeat(entity_path), repeat(output_path), datasets)", "build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity", "dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__", "parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six must", "\"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\",", "cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [", "\"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\",", "\"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\",", "= oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if", "run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception on nonz-ero", "dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f:", "= [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd)", "e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path):", "\"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features =", "else: cur.execute(query) results = \",\".join(x[0] for x in cur) results = results.rstrip(\",\") return", "must be provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0]", "dataset) if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset)", "parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count())", "FROM entity LEFT JOIN entity AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson", "results = results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' +", "return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer',", "\"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False,", "'{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN", "geojson != \"\" \"\"\" ) geography_datasets = [x[0] for x in cur] conn.close()", "[x[0] for x in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn =", "= \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser =", "text=True) try: proc.check_returncode() # raise exception on nonz-ero return code except subprocess.CalledProcessError as", "\"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script", "\"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\",", "json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\",", "entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor()", "\"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x", "in cur) results = results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson =", "Path from itertools import repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True) try:", "conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity", "run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is", "\"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM", "\"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ]", "code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e", "SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS oe ON entity.organisation_entity =", "on nonz-ero return code except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT", "'{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path,", "entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as", "def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\",", "\"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\"", "cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap(", "STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur =", "[ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\",", "cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap( build_tiles, zip(repeat(entity_path), repeat(output_path),", "cur.execute(query) results = \",\".join(x[0] for x in cur) results = results.rstrip(\",\") return results", "def build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is None:", "= subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception on nonz-ero return code", "entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset: query +=", "conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\",", "dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser", "= cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool:", "= \",\".join(x[0] for x in cur) results = results.rstrip(\",\") return results def create_geojson_file(features,", "cur) results = results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":['", "for x in cur) results = results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset):", "f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\",", "features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset): build_tiles_cmd", "] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS", "results.rstrip(\",\") return results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features +", "provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets =", "<reponame>anthonyrandell-madetech/tiles-builder<filename>build_tiles.py import argparse import sqlite3 import subprocess import multiprocessing as mp from pathlib", "import subprocess import multiprocessing as mp from pathlib import Path from itertools import", "\"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\",", "\"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties}))", "results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\" with", "build_tiles(entity_path, output_path, dataset): print(dataset) features = get_dataset_features(entity_path, dataset) if dataset is None: dataset", "\"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson != \"\" \"\"\" ) geography_datasets", "[ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def", "required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six must be provided)\", ) cmd_args", "except subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return", "cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x in cur) results =", "dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument(", "\"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\"", "!= \"\" \"\"\" ) geography_datasets = [x[0] for x in cur] conn.close() return", "\"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset)", "print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn", "\"\" \"\"\" ) geography_datasets = [x[0] for x in cur] conn.close() return geography_datasets", "\")\", \"IFNULL(entity.json, '{}')\" \")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity", "json_object({properties})) FROM entity LEFT JOIN entity AS oe ON entity.organisation_entity = oe.entity WHERE", "entity.dataset == ?\" cur.execute(query, (dataset,)) else: cur.execute(query) results = \",\".join(x[0] for x in", "cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None)", "= [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\",", "mp from pathlib import Path from itertools import repeat def run(command): proc =", "def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset", "\"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path,", "dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\",", "\"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\", f\"{output_path}/{dataset}.geojson\", ] run(build_tiles_cmd) def build_tiles(entity_path, output_path, dataset): print(dataset) features", "== \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1,", "----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor()", "oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur = conn.cursor() if dataset:", "= parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with", "entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\" \"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\",", "repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception", "oe ON entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties) ) cur", "databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", )", "(six must be provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path =", "cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM entity WHERE geojson !=", ") parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to use (six", "\"'name'\", \"entity.name\", \"'type'\", \"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\",", "try: proc.check_returncode() # raise exception on nonz-ero return code except subprocess.CalledProcessError as e:", "output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ == \"__main__\": parser = argparse.ArgumentParser(description=\"Script to build", "default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"),", "= cmd_args.output_dir[0] datasets = get_geography_datasets(entity_path) datasets.append(None) with mp.Pool(mp.cpu_count()) as pool: pool.starmap( build_tiles, zip(repeat(entity_path),", "= sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\" \"json_object(\"", "to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the", "get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\",", "type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path,", "pathlib import Path from itertools import repeat def run(command): proc = subprocess.run(command, capture_output=True,", "get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path) cur = conn.cursor() cur.execute( \"\"\" SELECT DISTINCT dataset FROM", ") cur = conn.cursor() if dataset: query += \"AND entity.dataset == ?\" cur.execute(query,", "is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if __name__ ==", "itertools import repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() #", "+ features + \"]}\" with open(f\"{output_path}/{dataset}.geojson\", \"w\") as f: f.write(geojson) def build_dataset_tiles(output_path, dataset):", "in cur] conn.close() return geography_datasets def get_dataset_features(entity_model_path, dataset=None): conn = sqlite3.connect(entity_model_path) json_properties =", "\")\", ] query = \"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity", "build_dataset_tiles(output_path, dataset): build_tiles_cmd = [ \"tippecanoe\", \"-z15\", \"-Z4\", \"-r1\", \"--no-feature-limit\", \"--no-tile-size-limit\", f\"--layer={dataset}\", f\"--output={output_path}/{dataset}.mbtiles\",", "if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path, dataset) if", "conn = sqlite3.connect(entity_model_path) json_properties = [ \"'tippecanoe'\", \"json_object('layer', entity.dataset)\", \"'entity'\", \"entity.entity\", \"'properties'\", \"json_patch(\"", "be provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0] output_path = cmd_args.output_dir[0] datasets", "entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available to", "subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode() # raise exception on nonz-ero return code except", "\"\"\" SELECT json_patch(entity.geojson, json_object({properties})) FROM entity LEFT JOIN entity AS oe ON entity.organisation_entity", "as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def", "= argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path", "sqlite3 import subprocess import multiprocessing as mp from pathlib import Path from itertools", "get_dataset_features(entity_path, dataset) if dataset is None: dataset = \"dataset_tiles\" create_geojson_file(features, output_path, dataset) build_dataset_tiles(output_path,", "help=\"Path to the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The", "the entity database\", ) parser.add_argument( \"--output-dir\", type=Path, nargs=1, required=False, default=Path(\"var/cache/\"), help=\"The numbers available", "entity AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson != '' \"\"\".format( properties=\",\".join(json_properties)", "\"entity.dataset\", \"'organisation'\", \"oe.name\", \"'entity'\", \"entity.entity\", \"'entry-date'\", \"entity.entry_date\", \"'start-date'\", \"entity.start_date\", \"'end-date'\", \"entity.end_date\" \")\", \"IFNULL(entity.json,", "LEFT JOIN entity AS oe ON entity.organisation_entity = oe.entity WHERE entity.geojson != ''", "= conn.cursor() if dataset: query += \"AND entity.dataset == ?\" cur.execute(query, (dataset,)) else:", "to use (six must be provided)\", ) cmd_args = parser.parse_args() entity_path = cmd_args.entity_path[0]", "subprocess.CalledProcessError as e: print(f\"\\n---- STDERR ----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc", "argparse.ArgumentParser(description=\"Script to build mbtiles databases\") parser.add_argument( \"--entity-path\", type=Path, nargs=1, required=False, default=Path(\"var/cache/entity.sqlite3\"), help=\"Path to", "----\\n{proc.stderr}\") print(f\"\\n---- STDOUT ----\\n{proc.stdout}\") raise e return proc def get_geography_datasets(entity_model_path): conn = sqlite3.connect(entity_model_path)", "return results def create_geojson_file(features, output_path, dataset): geojson = '{\"type\":\"FeatureCollection\",\"features\":[' + features + \"]}\"", "from itertools import repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True) try: proc.check_returncode()", "import Path from itertools import repeat def run(command): proc = subprocess.run(command, capture_output=True, text=True)" ]
[ "\"request\", response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload", "the status of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\",", "\"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def", "payload = { \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry)", "def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url += \"?url='{0}'\".format(query.parameter_type['url']) self.context.get_pending_request().beforeExecute", "a SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url},", "ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"),", "import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import", "= ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of", "url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request): query", "office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def", "create(self, request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\",", "\"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\": site_id } qry = ServiceOperationQuery(self,", "def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\": site_id }", "self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method =", "from office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from", "site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response)", "response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\": site_id", "= { \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def", "self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method", "import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self,", "None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of a SharePoint site\"\"\"", "status of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None,", "= SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute", "ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of a", "def create(self, request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self,", "{'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request):", "def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern", "url): \"\"\"Get the status of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry =", "\"\"\"Get the status of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self,", "return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url +=", "response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url += \"?url='{0}'\".format(query.parameter_type['url'])", "site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get", "payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of a SharePoint site\"\"\" response", "SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return response def", "_construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url += \"?url='{0}'\".format(query.parameter_type['url']) self.context.get_pending_request().beforeExecute -=", "def get_status(self, url): \"\"\"Get the status of a SharePoint site\"\"\" response = SPSiteCreationResponse()", "return response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\":", "SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a", "office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager,", "ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response", "ClientObject from office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath", "self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = {", "HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self,", "context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern site\"\"\" response", "get_status(self, url): \"\"\"Get the status of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry", "self._construct_status_request return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url", "response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query", "\"Create\", None, request, \"request\", response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a", "__init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern site\"\"\"", "from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context):", "request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get request.url += \"?url='{0}'\".format(query.parameter_type['url']) self.context.get_pending_request().beforeExecute -= self._construct_status_request", "import ClientObject from office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import", "delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\": site_id } qry", "ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry", "None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self, request): query =", "office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject):", "from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None)", "from office365.runtime.client_object import ClientObject from office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from", "of a SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url':", "{ \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self,", "ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse", "self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse()", "modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response)", "from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class", "\"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of a SharePoint", "SharePoint site\"\"\" payload = { \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None,", "a modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\",", "site_id): \"\"\"Deletes a SharePoint site\"\"\" payload = { \"siteId\": site_id } qry =", "office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse", "qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status", "SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request):", "qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request", "response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry)", "request, \"request\", response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\"", "site\"\"\" payload = { \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload)", "} qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url): \"\"\"Get the", "site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry)", "= SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return response", "import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context,", "= ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return response def delete(self, site_id):", "a SharePoint site\"\"\" payload = { \"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\",", "import HttpMethod from office365.runtime.resource_path import ResourcePath from office365.sharepoint.portal.SPSiteCreationResponse import SPSiteCreationResponse class SPSiteManager(ClientObject): def", "\"siteId\": site_id } qry = ServiceOperationQuery(self, \"Delete\", None, payload) self.context.add_query(qry) def get_status(self, url):", "request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None,", "self.context.add_query(qry) def get_status(self, url): \"\"\"Get the status of a SharePoint site\"\"\" response =", "= ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return", "None) def create(self, request): \"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry =", "qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return response def delete(self,", "SharePoint site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None,", "\"\"\"Create a modern site\"\"\" response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request,", "response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a SharePoint site\"\"\" payload =", "office365.runtime.client_object import ClientObject from office365.runtime.serviceOperationQuery import ServiceOperationQuery from office365.runtime.http.http_method import HttpMethod from office365.runtime.resource_path", "response = SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return", "SPSiteCreationResponse() qry = ServiceOperationQuery(self, \"Status\", None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute +=", "ServiceOperationQuery(self, \"Create\", None, request, \"request\", response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes", "+= self._construct_status_request return response def _construct_status_request(self, request): query = self.context.get_pending_request().current_query request.method = HttpMethod.Get", "None, {'url': url}, None, response) self.context.add_query(qry) self.context.get_pending_request().beforeExecute += self._construct_status_request return response def _construct_status_request(self,", "super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create a modern site\"\"\" response =", "class SPSiteManager(ClientObject): def __init__(self, context): super(SPSiteManager, self).__init__(context, ResourcePath(\"SPSiteManager\"), None) def create(self, request): \"\"\"Create", "None, request, \"request\", response) self.context.add_query(qry) return response def delete(self, site_id): \"\"\"Deletes a SharePoint" ]
[ "typing import List from backend.app import settings class MongoCollection: \"\"\"Generic class wrapper around", "return rv return None def all(self) -> List[dict]: \"\"\"Return all documents in collection.", "dict: \"\"\"Add the given attributes to the collection as a new document. \"\"\"", "\"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self, id, **kwargs)", "the given attributes and identified by the given id. Return True if document", "True if document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return", "the document in the collection identified by the given id. \"\"\" for rv", "rv return None def all(self) -> List[dict]: \"\"\"Return all documents in collection. \"\"\"", "db) def init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize this store, with", "a reference to the underlying collection this store will represent. \"\"\" if db", "from backend.app import settings class MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations", "document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self, id,", "None): self.init(collection_name, db) def init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize this", "= MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict:", "-> bool: \"\"\"Update the document in the collection with the given attributes and", "attributes and identified by the given id. Return True if document was updated.", "str, db: database.Database = None): self.init(collection_name, db) def init(self, collection_name: str, db: database.Database", "a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def", "around a collection. \"\"\" def __init__(self, collection_name: str, db: database.Database = None): self.init(collection_name,", "1 def get(self, id: str) -> dict: \"\"\"Return the document in the collection", "if necessary, and create a reference to the underlying collection this store will", "identified by the given id. Returns True if the document was deleted. \"\"\"", "collection. \"\"\" def __init__(self, collection_name: str, db: database.Database = None): self.init(collection_name, db) def", "{'$set': kwargs}) return rv.matched_count == 1 def get(self, id: str) -> dict: \"\"\"Return", "list(rv) def delete(self, id: str) -> bool: \"\"\"Delete document in collection identified by", "Mongo DB operations around a collection. \"\"\" def __init__(self, collection_name: str, db: database.Database", "def get(self, id: str) -> dict: \"\"\"Return the document in the collection identified", "str) -> dict: \"\"\"Return the document in the collection identified by the given", "__init__(self, collection_name: str, db: database.Database = None): self.init(collection_name, db) def init(self, collection_name: str,", "db is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def", "in the collection with the given attributes and identified by the given id.", "True if the document was deleted. \"\"\" rv = self.collection.delete_one({'_id': objectid.ObjectId(id)}) return rv.deleted_count", "= self.collection.find() return list(rv) def delete(self, id: str) -> bool: \"\"\"Delete document in", "objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def get(self, id: str) -> dict:", "def init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize this store, with the", "the given attributes to the collection as a new document. \"\"\" id =", "= None): self.init(collection_name, db) def init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize", "db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the given attributes to the collection", "bool: \"\"\"Update the document in the collection with the given attributes and identified", "settings class MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations around a collection.", "will represent. \"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME)", "the given id. Returns True if the document was deleted. \"\"\" rv =", "document in the collection identified by the given id. \"\"\" for rv in", "id. Returns True if the document was deleted. \"\"\" rv = self.collection.delete_one({'_id': objectid.ObjectId(id)})", "\"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def get(self,", "as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs", "Database or create one if necessary, and create a reference to the underlying", "the collection identified by the given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1):", "to the underlying collection this store will represent. \"\"\" if db is None:", "create(self, **kwargs) -> dict: \"\"\"Add the given attributes to the collection as a", "rv = self.collection.find() return list(rv) def delete(self, id: str) -> bool: \"\"\"Delete document", "if document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count", "\"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection =", "collection this store will represent. \"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI)", "\"\"\"Return the document in the collection identified by the given id. \"\"\" for", "and identified by the given id. Return True if document was updated. \"\"\"", "str, db: database.Database = None): \"\"\"Initialize this store, with the given mongo Database", "\"\"\"Initialize this store, with the given mongo Database or create one if necessary,", "collection. \"\"\" rv = self.collection.find() return list(rv) def delete(self, id: str) -> bool:", "given id. Returns True if the document was deleted. \"\"\" rv = self.collection.delete_one({'_id':", "def create(self, **kwargs) -> dict: \"\"\"Add the given attributes to the collection as", "import settings class MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations around a", "= db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the given attributes to the", "underlying collection this store will represent. \"\"\" if db is None: mongo_client =", "create one if necessary, and create a reference to the underlying collection this", "\"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self) ->", "**kwargs) -> bool: \"\"\"Update the document in the collection with the given attributes", "for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self) -> List[dict]:", "database.Database = None): \"\"\"Initialize this store, with the given mongo Database or create", "in collection identified by the given id. Returns True if the document was", "DB operations around a collection. \"\"\" def __init__(self, collection_name: str, db: database.Database =", "-> List[dict]: \"\"\"Return all documents in collection. \"\"\" rv = self.collection.find() return list(rv)", "this store, with the given mongo Database or create one if necessary, and", "kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update the document in the collection", "-> bool: \"\"\"Delete document in collection identified by the given id. Returns True", "json_util, objectid from pymongo import MongoClient, database from typing import List from backend.app", "import MongoClient, database from typing import List from backend.app import settings class MongoCollection:", "return None def all(self) -> List[dict]: \"\"\"Return all documents in collection. \"\"\" rv", "the document was deleted. \"\"\" rv = self.collection.delete_one({'_id': objectid.ObjectId(id)}) return rv.deleted_count == 1", "one if necessary, and create a reference to the underlying collection this store", "def __init__(self, collection_name: str, db: database.Database = None): self.init(collection_name, db) def init(self, collection_name:", "all documents in collection. \"\"\" rv = self.collection.find() return list(rv) def delete(self, id:", "Returns True if the document was deleted. \"\"\" rv = self.collection.delete_one({'_id': objectid.ObjectId(id)}) return", "the given mongo Database or create one if necessary, and create a reference", "in collection. \"\"\" rv = self.collection.find() return list(rv) def delete(self, id: str) ->", "update(self, id, **kwargs) -> bool: \"\"\"Update the document in the collection with the", "dict: \"\"\"Return the document in the collection identified by the given id. \"\"\"", "class wrapper around Mongo DB operations around a collection. \"\"\" def __init__(self, collection_name:", "was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1", "== 1 def get(self, id: str) -> dict: \"\"\"Return the document in the", "id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self)", "operations around a collection. \"\"\" def __init__(self, collection_name: str, db: database.Database = None):", "kwargs['_id'] = id return kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update the", "the collection with the given attributes and identified by the given id. Return", "pymongo import MongoClient, database from typing import List from backend.app import settings class", "attributes to the collection as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id']", "\"\"\"Delete document in collection identified by the given id. Returns True if the", "MongoClient, database from typing import List from backend.app import settings class MongoCollection: \"\"\"Generic", "by the given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return", "return rv.matched_count == 1 def get(self, id: str) -> dict: \"\"\"Return the document", "and create a reference to the underlying collection this store will represent. \"\"\"", "delete(self, id: str) -> bool: \"\"\"Delete document in collection identified by the given", "given mongo Database or create one if necessary, and create a reference to", "\"\"\"Update the document in the collection with the given attributes and identified by", "reference to the underlying collection this store will represent. \"\"\" if db is", "document in the collection with the given attributes and identified by the given", "rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def get(self, id:", "= self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def get(self, id: str)", "or create one if necessary, and create a reference to the underlying collection", "document in collection identified by the given id. Returns True if the document", "= id return kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update the document", "the given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None", "rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self) -> List[dict]: \"\"\"Return", "db: database.Database = None): \"\"\"Initialize this store, with the given mongo Database or", "import json_util, objectid from pymongo import MongoClient, database from typing import List from", "collection with the given attributes and identified by the given id. Return True", "MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations around a collection. \"\"\" def", "the document in the collection with the given attributes and identified by the", "= self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self, id, **kwargs) -> bool:", "**kwargs) -> dict: \"\"\"Add the given attributes to the collection as a new", "return list(rv) def delete(self, id: str) -> bool: \"\"\"Delete document in collection identified", "= mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the given", "by the given id. Returns True if the document was deleted. \"\"\" rv", "self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self) -> List[dict]: \"\"\"Return all documents", "def delete(self, id: str) -> bool: \"\"\"Delete document in collection identified by the", "\"\"\"Return all documents in collection. \"\"\" rv = self.collection.find() return list(rv) def delete(self,", "mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the given attributes", "given attributes to the collection as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id", "the collection as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id", "mongo Database or create one if necessary, and create a reference to the", "id: str) -> dict: \"\"\"Return the document in the collection identified by the", "-> dict: \"\"\"Return the document in the collection identified by the given id.", "objectid from pymongo import MongoClient, database from typing import List from backend.app import", "MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add", "by the given id. Return True if document was updated. \"\"\" rv =", "the given id. Return True if document was updated. \"\"\" rv = self.collection.update_one({'_id':", "Return True if document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs})", "identified by the given id. Return True if document was updated. \"\"\" rv", "kwargs}) return rv.matched_count == 1 def get(self, id: str) -> dict: \"\"\"Return the", "from typing import List from backend.app import settings class MongoCollection: \"\"\"Generic class wrapper", "in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def all(self) -> List[dict]: \"\"\"Return all", "store will represent. \"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI) db =", "is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self,", "store, with the given mongo Database or create one if necessary, and create", "new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self,", "collection as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return", "List[dict]: \"\"\"Return all documents in collection. \"\"\" rv = self.collection.find() return list(rv) def", "db: database.Database = None): self.init(collection_name, db) def init(self, collection_name: str, db: database.Database =", "self.init(collection_name, db) def init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize this store,", "collection_name: str, db: database.Database = None): \"\"\"Initialize this store, with the given mongo", "updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def", "objectid.ObjectId(id)}).limit(1): return rv return None def all(self) -> List[dict]: \"\"\"Return all documents in", "bool: \"\"\"Delete document in collection identified by the given id. Returns True if", "database from typing import List from backend.app import settings class MongoCollection: \"\"\"Generic class", "collection identified by the given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return", "represent. \"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection", "\"\"\" def __init__(self, collection_name: str, db: database.Database = None): self.init(collection_name, db) def init(self,", "around Mongo DB operations around a collection. \"\"\" def __init__(self, collection_name: str, db:", "self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count == 1 def get(self, id: str) ->", "bson import json_util, objectid from pymongo import MongoClient, database from typing import List", "database.Database = None): self.init(collection_name, db) def init(self, collection_name: str, db: database.Database = None):", "db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the", "given attributes and identified by the given id. Return True if document was", "id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self, id, **kwargs) ->", "given id. Return True if document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)},", "document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set': kwargs}) return rv.matched_count ==", "collection identified by the given id. Returns True if the document was deleted.", "with the given attributes and identified by the given id. Return True if", "if db is None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name)", "identified by the given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv", "given id. \"\"\" for rv in self.collection.find({\"_id\": objectid.ObjectId(id)}).limit(1): return rv return None def", "if the document was deleted. \"\"\" rv = self.collection.delete_one({'_id': objectid.ObjectId(id)}) return rv.deleted_count ==", "<reponame>ikumen/notas from bson import json_util, objectid from pymongo import MongoClient, database from typing", "the underlying collection this store will represent. \"\"\" if db is None: mongo_client", "collection_name: str, db: database.Database = None): self.init(collection_name, db) def init(self, collection_name: str, db:", "from pymongo import MongoClient, database from typing import List from backend.app import settings", "= None): \"\"\"Initialize this store, with the given mongo Database or create one", "id return kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update the document in", "\"\"\" rv = self.collection.find() return list(rv) def delete(self, id: str) -> bool: \"\"\"Delete", "id, **kwargs) -> bool: \"\"\"Update the document in the collection with the given", "-> dict: \"\"\"Add the given attributes to the collection as a new document.", "return kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update the document in the", "\"\"\"Generic class wrapper around Mongo DB operations around a collection. \"\"\" def __init__(self,", "init(self, collection_name: str, db: database.Database = None): \"\"\"Initialize this store, with the given", "def all(self) -> List[dict]: \"\"\"Return all documents in collection. \"\"\" rv = self.collection.find()", "rv.matched_count == 1 def get(self, id: str) -> dict: \"\"\"Return the document in", "import List from backend.app import settings class MongoCollection: \"\"\"Generic class wrapper around Mongo", "to the collection as a new document. \"\"\" id = self.collection.insert_one(kwargs).inserted_id kwargs['_id'] =", "this store will represent. \"\"\" if db is None: mongo_client = MongoClient(settings.MONGO_URI) db", "None): \"\"\"Initialize this store, with the given mongo Database or create one if", "a collection. \"\"\" def __init__(self, collection_name: str, db: database.Database = None): self.init(collection_name, db)", "\"\"\"Add the given attributes to the collection as a new document. \"\"\" id", "get(self, id: str) -> dict: \"\"\"Return the document in the collection identified by", "self.collection.insert_one(kwargs).inserted_id kwargs['_id'] = id return kwargs def update(self, id, **kwargs) -> bool: \"\"\"Update", "necessary, and create a reference to the underlying collection this store will represent.", "mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs) ->", "id. Return True if document was updated. \"\"\" rv = self.collection.update_one({'_id': objectid.ObjectId(id)}, {'$set':", "None: mongo_client = MongoClient(settings.MONGO_URI) db = mongo_client.get_database(settings.MONGO_DB_NAME) self.collection = db.get_collection(collection_name) def create(self, **kwargs)", "backend.app import settings class MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations around", "wrapper around Mongo DB operations around a collection. \"\"\" def __init__(self, collection_name: str,", "class MongoCollection: \"\"\"Generic class wrapper around Mongo DB operations around a collection. \"\"\"", "self.collection.find() return list(rv) def delete(self, id: str) -> bool: \"\"\"Delete document in collection", "in the collection identified by the given id. \"\"\" for rv in self.collection.find({\"_id\":", "self.collection = db.get_collection(collection_name) def create(self, **kwargs) -> dict: \"\"\"Add the given attributes to", "List from backend.app import settings class MongoCollection: \"\"\"Generic class wrapper around Mongo DB", "create a reference to the underlying collection this store will represent. \"\"\" if", "all(self) -> List[dict]: \"\"\"Return all documents in collection. \"\"\" rv = self.collection.find() return", "documents in collection. \"\"\" rv = self.collection.find() return list(rv) def delete(self, id: str)", "str) -> bool: \"\"\"Delete document in collection identified by the given id. Returns", "None def all(self) -> List[dict]: \"\"\"Return all documents in collection. \"\"\" rv =", "def update(self, id, **kwargs) -> bool: \"\"\"Update the document in the collection with", "id: str) -> bool: \"\"\"Delete document in collection identified by the given id.", "from bson import json_util, objectid from pymongo import MongoClient, database from typing import", "with the given mongo Database or create one if necessary, and create a" ]
[ "i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" % self.m_currentFrame, i_frame) self.m_currentFrame +=", "not need to brodcast this information Args: i_xPos: The X position of the", "the puck i_Ypos: The Y position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame):", "position of the puck i_Ypos: The Y position of the puck \"\"\" pass", "This implementation does nothing, as the USB implementation does not need to brodcast", "<reponame>victoriapc/HockusPockus<gh_stars>0 from VisionInterfaces.Broadcaster import Broadcaster import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame", "import Broadcaster import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def", "def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the USB implementation does not", "as the USB implementation does not need to brodcast this information Args: i_xPos:", "puck (i.e, displays it on the screen) Args: i_frame: The altered frame to", "i_Ypos: The Y position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\"", "of the puck i_Ypos: The Y position of the puck \"\"\" pass def", "X position of the puck i_Ypos: The Y position of the puck \"\"\"", "(i.e, displays it on the screen) Args: i_frame: The altered frame to publish", "it on the screen) Args: i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\"", "Args: i_xPos: The X position of the puck i_Ypos: The Y position of", "0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the USB implementation does", "need to brodcast this information Args: i_xPos: The X position of the puck", "The X position of the puck i_Ypos: The Y position of the puck", "the USB implementation does not need to brodcast this information Args: i_xPos: The", "Y position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video", "the puck (i.e, displays it on the screen) Args: i_frame: The altered frame", "video feed of the puck (i.e, displays it on the screen) Args: i_frame:", "on the screen) Args: i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" %", "i_xPos: The X position of the puck i_Ypos: The Y position of the", "The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" % self.m_currentFrame, i_frame) self.m_currentFrame += 1", "position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed", "feed of the puck (i.e, displays it on the screen) Args: i_frame: The", "import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\"", "nothing, as the USB implementation does not need to brodcast this information Args:", "does not need to brodcast this information Args: i_xPos: The X position of", "Broadcaster import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos):", "implementation does nothing, as the USB implementation does not need to brodcast this", "\"\"\" This implementation does nothing, as the USB implementation does not need to", "class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation", "USB implementation does not need to brodcast this information Args: i_xPos: The X", "puck i_Ypos: The Y position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\"", "information Args: i_xPos: The X position of the puck i_Ypos: The Y position", "puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the puck", "self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the USB", "the screen) Args: i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" % self.m_currentFrame,", "def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the puck (i.e, displays it", "\"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the puck (i.e,", "def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as", "__init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the", "The Y position of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the", ": def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing,", "= 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the USB implementation", "implementation does not need to brodcast this information Args: i_xPos: The X position", "does nothing, as the USB implementation does not need to brodcast this information", "\"\"\" \"Broadcasts\" the video feed of the puck (i.e, displays it on the", "of the puck (i.e, displays it on the screen) Args: i_frame: The altered", "VisionInterfaces.Broadcaster import Broadcaster import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0", "to brodcast this information Args: i_xPos: The X position of the puck i_Ypos:", "of the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of", "displays it on the screen) Args: i_frame: The altered frame to publish \"\"\"", "from VisionInterfaces.Broadcaster import Broadcaster import cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame =", "screen) Args: i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" % self.m_currentFrame, i_frame)", "pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the puck (i.e, displays", "broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does nothing, as the USB implementation does not need", "\"Broadcasts\" the video feed of the puck (i.e, displays it on the screen)", "brodcast this information Args: i_xPos: The X position of the puck i_Ypos: The", "this information Args: i_xPos: The X position of the puck i_Ypos: The Y", "the video feed of the puck (i.e, displays it on the screen) Args:", "broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the puck (i.e, displays it on", "Args: i_frame: The altered frame to publish \"\"\" cv2.imwrite(\"VisionTest\\outputFrames\\Outframe%d.jpg\" % self.m_currentFrame, i_frame) self.m_currentFrame", "the puck \"\"\" pass def broadcastVideoOfPuck(self,i_frame): \"\"\" \"Broadcasts\" the video feed of the", "BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This implementation does", "cv2 class BroadcasterTEST(Broadcaster) : def __init__(self): self.m_currentFrame = 0 def broadcastCoordinatesOfPuck(self,i_xPos,i_Ypos): \"\"\" This" ]
[ "random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child", "10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3] ])", "[3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7,", "[] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility", "] hourlystaff_needed = np.array( [ [3, 3, 3, 3, 3, 3, 10, 7,", "0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0,", "staff_planning = [ [ [0, 0, 8], [1, 0, 8], [2, 0, 8],", "two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0])", "parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation worst", "15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0,", "\"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation(", "= list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent in enumerate( parent_gen) if", "for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen", "for day in staff_planning: hourlystaff_day = [] for employee in day: employee_present_hour =", "0, 10] ], [ [0, 0, 10], [1, 0, 10], [2, 0, 10],", "parents and make a random combination of those two parents by applying a", "a given time based on the time list of 3 (id, start time,", "rand2, rand3] = np.random.randint(0, 8) # change here return parent def mutate_gen(parent_gen, n_mutations):", "= np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as hours understaffed + hours", "[ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10],", "[28, 0, 8], [29, 0, 8] ], [ [0, 0, 8], [1, 0,", "rand3] = np.random.randint(0, 8) # change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen", "i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 =", "18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0,", "8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8],", "2, 2, 6, 6, 6, 2, 2, 2, 2], [0, 0, 0, 0,", "day in range(n_days): day_planning = [] for employee_id in range(n_staff): start_time = np.random.randint(0,", "= np.array( [ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12,", "def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations):", "= [ [ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3,", "import numpy as np import pandas as pd staff_planning = [ [[0, 0,", "2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return", "= parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask =", "abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost = 1 cost = overstaff_cost *", "employee_present_hour = [] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week", "8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8],", "//list these 3 things \"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration =", "understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost = 1 cost =", "parents = [] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return", "= [parent for parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection -", "staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning: hourlystaff_day = [] for employee", "0, 8]], [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0,", "dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1", "in staff_planning: hourlystaff_day = [] for employee in day: employee_present_hour = [] for", "8], [28, 0, 8], [29, 0, 8] ], [ [0, 0, 8], [1,", "[26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]], [[0,", "[10, 0, 10] ], ] hourlystaff_needed = np.array([ [0, 0, 0, 0, 0,", "generated yes/no mask to the two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents", "for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0,", "period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a", "a parent generation of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents", "8) # changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\"", "= np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask))", "0, 8], [29, 0, 8] ] ] hourlystaff_needed = np.array([ [3, 3, 3,", "6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6]", "9, 9, 12, 15, 10, 10, 3, 3, 3], [3, 3, 3, 3,", "parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2)", "0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0,", "0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0,", "[ [ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0,", "return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create", "= [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost =", "\"\"\" create a parent generation of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5,", "duration) //list these 3 things \"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration", "18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6] ])", "12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3], ]) \"\"\"", "range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)]", "[0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18,", "0, 8], [29, 0, 8]] ] hourlystaff_needed = np.array( [ [3, 3, 3,", "6, 6, 6, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0,", "the employee is present yes or no on a given time based on", "np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3]", "9, 9, 12, 15, 10, 10, 3, 3, 3] ]) hourlystaff_needed = np.array(", "parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse fitness) \"\"\" def", "]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 10], [1, 0, 10],", "+ hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff", "1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask),", "]) \"\"\" \"\"\" Employee present: analyse whether the employee is present yes or", "1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen =", "range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere employee =", "2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here return parent def", "is present yes or no on a given time based on the time", "3, 3, 3], [3, 3, 3, 3, 3, 3, 10, 7, 12, 12,", "8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8],", "range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost", "[4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8,", "= 1 cost = overstaff_cost * overstaff + understaff_cost * understaff return cost", "as hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff", "= [ [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0,", "0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0,", "[22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26,", "8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8],", "enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best", "= employee_start_time + employee_duration if (time >= employee_start_time) and (time < employee_end_time): return", "parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2", "= np.random.randint(0, 8) # change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen =", "9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3,", "\"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent", "make a random combination of those two parents by applying a randomly generated", "n_days=5, n_staff=30): parents = [] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff)", "0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0,", "random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of", "3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 6,", "2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2,", "create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen =", "[ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9,", "0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0,", "8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [ 15, 0,", "\"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0])", "np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations):", "10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10],", "parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed,", "10], [10, 0, 10] ], ] hourlystaff_needed = np.array([ [0, 0, 0, 0,", "pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent", "0, 8], [28, 0, 8], [29, 0, 8] ], [ [0, 0, 8],", "= len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for i in range(n_offspring): random_dad", "= np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here return", "9, 12, 15, 10, 10, 3, 3, 3], ]) \"\"\" \"\"\" Employee present:", "[] for day in staff_planning: hourlystaff_day = [] for employee in day: employee_present_hour", "= generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration, select randomly two", "18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6] ]) \"\"\"", "0, 10], [9, 0, 10], [10, 0, 10] ], ] hourlystaff_needed = np.array([", "12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3,", "6, 2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning = [ [ [0,", "in enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\"", "= np.array([ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9,", "10, 10, 3, 3, 3], [3, 3, 3, 3, 3, 3, 10, 7,", "8], [28, 0, 8], [29, 0, 8]], [[0, 0, 8], [1, 0, 8],", "n_mutations): mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\"", "10 hours is not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen):", "rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here", "[16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20,", "\"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i in range(n_parents): parent", "[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4,", "0, 8], [29, 0, 8] ], [ [0, 0, 8], [1, 0, 8],", "parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse fitness)", "0, 8], [29, 0, 8]], [[0, 0, 8], [1, 0, 8], [2, 0,", "[1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5,", "12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18,", "np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as hours understaffed + hours overstaffed", "= [] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom", "[1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5,", "= abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost = 1 cost = overstaff_cost", "6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2] ])", "0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0,", "[[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,", "6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6,", "\"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors >", "day_planning = [] for employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration =", "6, 18, 18, 18, 6, 6, 6], [0, 0, 0, 0, 0, 0,", "cost is calculated as hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed):", "np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent in", "0, 8], [ 15, 0, 8], [16, 0, 8], [17, 0, 8], [18,", "2, 2, 6, 6, 6, 2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning", "0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0,", "2, 2] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 8], [1,", "np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here return parent", "parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change here return parent def mutate_gen(parent_gen,", "selection - cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = []", "[29, 0, 8] ] ] hourlystaff_needed = np.array([ [3, 3, 3, 3, 3,", "def random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring", "each iteration, select randomly two parents and make a random combination of those", "12, 9, 9, 12, 15, 10, 10, 3, 3, 3] ]) hourlystaff_needed =", "overstaff + understaff_cost * understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning", "\"\"\" for each iteration, select randomly two parents and make a random combination", "mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for i", "6, 6] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 10], [1,", "12, 15, 10, 10, 3, 3, 3] ]) hourlystaff_needed = np.array( [[3, 3,", "3, 3], [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9,", "= len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for i", "for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return", "parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1)", "idx, parent in enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\" overall", "12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3] ]) hourlystaff_needed", "high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2,", "as pd staff_planning = [ [[0, 0, 8], [1, 0, 8], [2, 0,", "3] ]) hourlystaff_needed = np.array( [[3, 3, 3, 3, 3, 3, 10, 7,", "n_parents = len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for", "in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents -", "2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 4, 4, 4,", "staff_planning = [ [ [0, 0, 10], [1, 0, 10], [2, 0, 10],", "[0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4,", "def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum())", "Employee present: analyse whether the employee is present yes or no on a", "return hourlystaff_week \"\"\" cost is calculated as hours understaffed + hours overstaffed \"\"\"", "in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size =", "hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 4, 4, 4, 2,", "0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6,", "8], [29, 0, 8]] ] hourlystaff_needed = np.array( [ [3, 3, 3, 3,", "selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent in enumerate( parent_gen)", "10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10],", "np import pandas as pd staff_planning = [ [[0, 0, 8], [1, 0,", "0, 8], [14, 0, 8], [ 15, 0, 8], [16, 0, 8], [17,", "= cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation worst is: {}'.format(", "[27, 0, 8], [28, 0, 8], [29, 0, 8]], [[0, 0, 8], [1,", "= abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost", "feasibility \"\"\" def is_acceptable(parent): # work > 10 hours is not ok return", "- 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape)", "return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen", "np.array([ [0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2,", "size1 = parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0,", "18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6], [0, 0,", "for each iteration, select randomly two parents and make a random combination of", "6] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 10], [1, 0,", "[9, 0, 10], [10, 0, 10] ], ] hourlystaff_needed = np.array([ [0, 0,", "n_best): costs = [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning)", "3], ]) \"\"\" \"\"\" Employee present: analyse whether the employee is present yes", "6, 6], [0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6,", "\"\"\" staff_planning = [ [ [0, 0, 10], [1, 0, 10], [2, 0,", "0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0,", "2, 2], [0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2,", "18, 6, 6, 6], [0, 0, 0, 0, 0, 0, 6, 12, 12,", "select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning =", "3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12,", "0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0,", "8]], [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8],", "\"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation", "- 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad,", "hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen,", "3, 3] ]) hourlystaff_needed = np.array( [[3, 3, 3, 3, 3, 3, 10,", "0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0,", "\"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning: hourlystaff_day =", "combination of those two parents by applying a randomly generated yes/no mask to", "things \"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time =", "8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8],", "return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent,", "= [parent for idx, parent in enumerate( parent_gen) if idx in selected_parents_idx] return", "8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8],", "\"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time = employee_start_time", "True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in", "present: analyse whether the employee is present yes or no on a given", "as np import pandas as pd staff_planning = [ [[0, 0, 8], [1,", "[17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21,", "{}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx", "parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen", "4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6,", "for employee in day: employee_present_hour = [] for time in range(0, 24): employee_present_hour.append(employee_present(employee,", "n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring = []", "for day in range(n_days): day_planning = [] for employee_id in range(n_staff): start_time =", "these 3 things \"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2]", "= hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors <", "if is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse fitness) \"\"\" def select_best(parent_gen,", "[6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10,", "\"\"\" selection - feasibility \"\"\" def is_acceptable(parent): # work > 10 hours is", "0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2,", "8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9, 0, 8],", "n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent): # work >", "parent_gen \"\"\" selection - cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs", "period_planning = [] for day in range(n_days): day_planning = [] for employee_id in", "random combination of those two parents by applying a randomly generated yes/no mask", "8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8],", "= parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0, size1)", "8] ] ] hourlystaff_needed = np.array([ [3, 3, 3, 3, 3, 3, 10,", "8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8],", "= [] for day in range(n_days): day_planning = [] for employee_id in range(n_staff):", "[] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning,", "[27, 0, 8], [28, 0, 8], [29, 0, 8] ] ] hourlystaff_needed =", "by applying a randomly generated yes/no mask to the two selected parents \"\"\"", "[26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]] ]", "= np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere employee = [employee_id, start_time,", "8], [29, 0, 8]], [[0, 0, 8], [1, 0, 8], [2, 0, 8],", "n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning", "8], [7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8],", "= np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation", "<gh_stars>0 import numpy as np import pandas as pd staff_planning = [ [[0,", "[7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ],", "10, 10, 3, 3, 3], ]) \"\"\" \"\"\" Employee present: analyse whether the", "n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100)", "analyse whether the employee is present yes or no on a given time", "mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations): rand1", "3, 3], ]) \"\"\" \"\"\" Employee present: analyse whether the employee is present", "mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\"", "[2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6,", "n_employees = len(parents[0][0]) offspring = [] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0,", "overstaff_cost = 1 understaff_cost = 1 cost = overstaff_cost * overstaff + understaff_cost", "mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen", "return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent): # work > 10", "6, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 4, 4,", "0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0,", "2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2,", "10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ], [ [0,", "[27, 0, 8], [28, 0, 8], [29, 0, 8] ], [ [0, 0,", "in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1,", "parent generation of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents =", "def employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time = employee_start_time +", "3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 6, 12,", "employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning =", "changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning", "n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning = gen_algo(hourlystaff_needed, n_iterations=100) print(best_planning)", "8], [29, 0, 8] ], [ [0, 0, 8], [1, 0, 8], [2,", "employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere", "plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i in range(n_parents):", "[parent for parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection - cost", "[10, 0, 10] ], [ [0, 0, 10], [1, 0, 10], [2, 0,", "= 1 understaff_cost = 1 cost = overstaff_cost * overstaff + understaff_cost *", "in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse fitness) \"\"\"", "6, 6, 2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning = [ [", "cost = overstaff_cost * overstaff + understaff_cost * understaff return cost \"\"\" \"\"\"", "mask to the two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents)", "9, 12, 15, 10, 10, 3, 3, 3], [3, 3, 3, 3, 3,", "ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent in enumerate(", "0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0,", "8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0, 8],", "best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning = gen_algo(hourlystaff_needed, n_iterations=100) print(best_planning) print(staffplanning_to_hourlyplanning(best_planning[0]))", "n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in", "[8, 0, 10], [9, 0, 10], [10, 0, 10] ], ] hourlystaff_needed =", "3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9,", "start time, duration) //list these 3 things \"\"\" def employee_present(employee, time): employee_start_time =", "0, 8]] ] hourlystaff_needed = np.array( [ [3, 3, 3, 3, 3, 3,", "ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for", "[13, 0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17,", "0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ], [", "parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning =", "two parents by applying a randomly generated yes/no mask to the two selected", "here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in parent_gen:", "1 understaff_cost = 1 cost = overstaff_cost * overstaff + understaff_cost * understaff", "[14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18,", "0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6,", "employee_end_time = employee_start_time + employee_duration if (time >= employee_start_time) and (time < employee_end_time):", "the two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods =", "def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for", "understaff_cost = 1 cost = overstaff_cost * overstaff + understaff_cost * understaff return", "0, 8] ] ] hourlystaff_needed = np.array([ [3, 3, 3, 3, 3, 3,", "np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child)", "0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6,", "12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18,", "2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2],", "+ employee_duration if (time >= employee_start_time) and (time < employee_end_time): return True return", "12, 9, 9, 12, 15, 10, 10, 3, 3, 3], ]) \"\"\" \"\"\"", "staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation", "len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for i in", "= select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child", "- hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost", "in day: employee_present_hour = [] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour)", "9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]])", "4, 4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6,", "of 3 (id, start time, duration) //list these 3 things \"\"\" def employee_present(employee,", "12, 6, 6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6,", "0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]], [[0, 0,", "6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2], [0, 0,", "0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0,", "idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx,", "0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0,", "10] ], [ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3,", "[ [ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0,", "0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0,", "2, 2, 2, 6, 6, 6, 2, 2, 2, 2], [0, 0, 0,", "time based on the time list of 3 (id, start time, duration) //list", "8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25, 0, 8],", "[20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24,", "n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for i in range(n_mutations): rand1 =", "understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day", "8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8],", "or no on a given time based on the time list of 3", "6, 6, 18, 18, 18, 6, 6, 6], [0, 0, 0, 0, 0,", "= create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen", "is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True)", "8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8],", "18, 18, 18, 6, 6, 6], [0, 0, 0, 0, 0, 0, 6,", "15, 10, 10, 3, 3, 3], [3, 3, 3, 3, 3, 3, 10,", "overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size,", "parents.append(parent) return parents \"\"\" for each iteration, select randomly two parents and make", "return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day", ":, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen", "10, 3, 3, 3], ]) \"\"\" \"\"\" Employee present: analyse whether the employee", "], [ [0, 0, 10], [1, 0, 10], [2, 0, 10], [3, 0,", "2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen if", "0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0,", "0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost = 1 cost", "func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5,", "]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 8], [1, 0, 8],", "= employee[1] employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration if (time >=", "8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8],", "8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8],", "8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]", "[24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8], [28,", "len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for i in range(n_offspring): random_dad =", "high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child =", "0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2,", "start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere employee = [employee_id,", "\"\"\" Employee present: analyse whether the employee is present yes or no on", "[5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10], [9,", "work > 10 hours is not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any())", "n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of n parent plannings", "dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom,", "on the time list of 3 (id, start time, duration) //list these 3", "cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(),", "random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask =", "[4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8,", "23) duration = np.random.randint(0, 8) # changehere employee = [employee_id, start_time, duration] day_planning.append(employee)", "no on a given time based on the time list of 3 (id,", "0, 10], [9, 0, 10], [10, 0, 10] ], [ [0, 0, 10],", "list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent in enumerate( parent_gen) if idx", "randomly two parents and make a random combination of those two parents by", "15, 10, 10, 3, 3, 3] ]) hourlystaff_needed = np.array( [[3, 3, 3,", "18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6], [0,", "6, 6, 18, 18, 18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning =", "parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i in", "[11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15,", "a randomly generated yes/no mask to the two selected parents \"\"\" def random_combine(parents,", "3 things \"\"\" def employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time", "select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen,", "np.array([ [0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6,", "[11, 0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [", "yes or no on a given time based on the time list of", "on a given time based on the time list of 3 (id, start", "random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of n parent plannings \"\"\"", "[ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8],", "hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum())", "parent in enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\" overall func", "18, 18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning = [ [ [0,", "8]] ] hourlystaff_needed = np.array( [ [3, 3, 3, 3, 3, 3, 10,", "8], [ 15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0,", "0, 10] ], ] hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0,", "9, 12, 15, 10, 10, 3, 3, 3] ]) hourlystaff_needed = np.array( [[3,", "10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ], ] hourlystaff_needed", "a random combination of those two parents by applying a randomly generated yes/no", "+ understaff_cost * understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning =", "in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\"", "= np.array([ [0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2,", "range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size)", "0]) selected_parents = [parent for idx, parent in enumerate( parent_gen) if idx in", "10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0, 10],", "print('generation best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values(", "0, 8], [14, 0, 8], [15, 0, 8], [16, 0, 8], [17, 0,", "time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as", "# change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent", "6, 6, 6, 2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning = [", "[6, 0, 10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10,", "[10, 0, 8], [11, 0, 8], [12, 0, 8], [13, 0, 8], [14,", "i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each", "mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection", "8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27, 0, 8],", "= [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5,", "# work > 10 hours is not ok return np.logical_not((np.array(parent)[:, :, 2:] >", "generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations):", "if idx in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations):", "0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8], [20, 0,", "numpy as np import pandas as pd staff_planning = [ [[0, 0, 8],", "6, 6, 6, 6, 18, 18, 18, 6, 6, 6] ]) \"\"\" \"\"\"", "0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23, 0,", "12, 9, 9, 12, 15, 10, 10, 3, 3, 3], [3, 3, 3,", "8], [28, 0, 8], [29, 0, 8] ] ] hourlystaff_needed = np.array([ [3,", "return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in", "2, 2, 2, 6, 6, 6, 2, 2, 2, 2] ]) \"\"\" \"\"\"", "return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 =", "> 10 hours is not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def", "2], [0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2,", "\"\"\" \"\"\" Employee present: analyse whether the employee is present yes or no", "[26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ],", "parent_gen = [parent for parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection", "parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child", "> 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost = 1", "offspring = [] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)]", "in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation", "3], [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9,", "offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1]", "generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in range(n_days): day_planning = [] for", "worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best,", "2, 6, 6, 6, 2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning =", "[25, 0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29,", "6, 6, 6, 18, 18, 18, 6, 6, 6], [0, 0, 0, 0,", "8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8],", "employee_start_time) and (time < employee_end_time): return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning):", "[] for employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8)", "\"\"\" def is_acceptable(parent): # work > 10 hours is not ok return np.logical_not((np.array(parent)[:,", "0, 10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0,", "parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent): #", "1 cost = overstaff_cost * overstaff + understaff_cost * understaff return cost \"\"\"", "9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3],", "] hourlystaff_needed = np.array([ [3, 3, 3, 3, 3, 3, 10, 7, 12,", "rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1,", "it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen =", "generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx =", "is not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen =", "selected_parents = [parent for idx, parent in enumerate( parent_gen) if idx in selected_parents_idx]", "0, 8], [12, 0, 8], [13, 0, 8], [14, 0, 8], [ 15,", "def select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen if is_acceptable(parent)] return parent_gen", "parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1)", "0, 8], [28, 0, 8], [29, 0, 8]] ] hourlystaff_needed = np.array( [", "[] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom =", "hourlystaff_needed = np.array( [[3, 3, 3, 3, 3, 3, 10, 7, 12, 12,", "\"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 6, 12, 12,", "is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0])", "\"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for i in", "select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child =", "] ] hourlystaff_needed = np.array([ [3, 3, 3, 3, 3, 3, 10, 7,", "np.array( [ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9,", "pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents =", "10, 10, 3, 3, 3] ]) hourlystaff_needed = np.array( [[3, 3, 3, 3,", "6, 6, 6, 18, 18, 18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning", "0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6,", "given time based on the time list of 3 (id, start time, duration)", "costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for", "iteration, select randomly two parents and make a random combination of those two", "n_days=5, n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed,", "0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10], [6, 0,", "= mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning = gen_algo(hourlystaff_needed,", "[29, 0, 8]], [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3,", "24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is", "len(parents[0][0]) offspring = [] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents -", "= len(parents[0][0]) offspring = [] for i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents", "employee in day: employee_present_hour = [] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time))", "start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning", "day: employee_present_hour = [] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day)", "\"\"\" \"\"\" staff_planning = [ [ [0, 0, 10], [1, 0, 10], [2,", "and make a random combination of those two parents by applying a randomly", "0, 10], [10, 0, 10] ], [ [0, 0, 10], [1, 0, 10],", "0, 8], [21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0,", "hourlystaff_needed) \"\"\" create a parent generation of n parent plannings \"\"\" def create_parent_generation(n_parents,", "parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost])", "6], [0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6,", "size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring", "0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ] ]", "def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in range(n_days): day_planning = []", "size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] =", "(inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning", "def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return", "in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration,", "def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning: hourlystaff_day = [] for", "duration = np.random.randint(0, 8) # changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning)", "[18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22,", "(time >= employee_start_time) and (time < employee_end_time): return True return False \"\"\" \"\"\"", "[29, 0, 8] ], [ [0, 0, 8], [1, 0, 8], [2, 0,", "cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in range(n_days):", "by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx, parent in", "[28, 0, 8], [29, 0, 8] ] ] hourlystaff_needed = np.array([ [3, 3,", "3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12,", "2, 2, 2, 2] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0,", "= select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning = gen_algo(hourlystaff_needed, n_iterations=100) print(best_planning) print(staffplanning_to_hourlyplanning(best_planning[0])) print(hourlystaff_needed)", "6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6, 6, 18,", "8], [28, 0, 8], [29, 0, 8]] ] hourlystaff_needed = np.array( [ [3,", "[parent for idx, parent in enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents", "[employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30)", "0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6, 6,", "def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i in range(n_parents): parent =", "select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\"", "create a parent generation of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30):", "for i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3", "0, 8], [28, 0, 8], [29, 0, 8] ] ] hourlystaff_needed = np.array([", "np.random.randint(0, 8) # changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning", "cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of n parent plannings \"\"\" def", "1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask", "overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1", "8], [13, 0, 8], [14, 0, 8], [ 15, 0, 8], [16, 0,", "8], [23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8],", "6, 18, 18, 18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning = [", "day in staff_planning: hourlystaff_day = [] for employee in day: employee_present_hour = []", "hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff =", "size2 = parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 =", "= [] for day in staff_planning: hourlystaff_day = [] for employee in day:", "understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed", "0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]] ] hourlystaff_needed", "= np.array( [[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9,", "generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration, select randomly two parents", "\"\"\" cost is calculated as hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff,", "= np.random.randint(0, 8) # changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return", "to the two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods", "10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]]) \"\"\"", "np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere employee = [employee_id, start_time, duration]", "= parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0, 2, size=np.array(random_dad).shape) mom_mask = np.logical_not(dad_mask)", "= 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen", "np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 =", "return parent_gen \"\"\" selection - cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best):", "n_staff=30): parents = [] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent)", "[12, 0, 8], [13, 0, 8], [14, 0, 8], [ 15, 0, 8],", "2] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 8], [1, 0,", "0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0,", "> 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen if is_acceptable(parent)]", "parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration, select randomly", "parent_cost]) print('generation best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp =", "0, 8], [2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0,", "return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent", "mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0]", "pandas as pd staff_planning = [ [[0, 0, 8], [1, 0, 8], [2,", "]) hourlystaff_needed = np.array( [[3, 3, 3, 3, 3, 3, 10, 7, 12,", "\"\"\" staff_planning = [ [ [0, 0, 8], [1, 0, 8], [2, 0,", "2, 6, 6, 6, 2, 2, 2, 2], [0, 0, 0, 0, 0,", "0, 0, 4, 4, 4, 2, 2, 2, 6, 6, 2, 2, 2,", "for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed)", "10] ], ] hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 4,", "[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10,", "time list of 3 (id, start time, duration) //list these 3 things \"\"\"", "the time list of 3 (id, start time, duration) //list these 3 things", "parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is:", "random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask", "(time < employee_end_time): return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week =", "hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost =", "3, 3, 3], ]) \"\"\" \"\"\" Employee present: analyse whether the employee is", "0, 0, 0, 0, 0, 6, 12, 12, 12, 6, 6, 18, 18,", "create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days,", "10], [9, 0, 10], [10, 0, 10] ], ] hourlystaff_needed = np.array([ [0,", "staff_planning = [ [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3,", "staff_planning: hourlystaff_day = [] for employee in day: employee_present_hour = [] for time", "in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent):", "0, 10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0,", "] hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 4, 4, 4,", "= employee[2] employee_end_time = employee_start_time + employee_duration if (time >= employee_start_time) and (time", "in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen,", "4, 2, 2, 2, 6, 6, 2, 2, 2, 6, 6, 6, 2,", "return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning:", "12, 15, 10, 10, 3, 3, 3], [3, 3, 3, 3, 3, 3,", "3 (id, start time, duration) //list these 3 things \"\"\" def employee_present(employee, time):", "employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration if (time >= employee_start_time) and", "15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19,", "parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def", "= [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection -", "understaff_cost * understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = []", "hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max()))", "[8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0, 8], [12,", "12, 9, 9, 12, 15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed =", "= staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {},", "10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3], [3,", "6, 18, 18, 6, 6, 6, 6, 18, 18, 18, 6, 6, 6],", "3, 3, 3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12,", "np.array([ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9,", "whether the employee is present yes or no on a given time based", "period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\"", "parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen)", "costs = [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning( parent_staff_planning) parent_cost", "hourlystaff_needed = np.array([ [3, 3, 3, 3, 3, 3, 10, 7, 12, 12,", "hourlystaff_day = [] for employee in day: employee_present_hour = [] for time in", "selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen =", "time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week", "= parent.shape[1] for i in range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0,", "9, 9, 12, 15, 10, 10, 3, 3, 3], ]) \"\"\" \"\"\" Employee", "change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in", "0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0,", "child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def", "is calculated as hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors", "for parent in parent_gen if is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse", "[27, 0, 8], [28, 0, 8], [29, 0, 8]] ] hourlystaff_needed = np.array(", "12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3], [3, 3,", "[14, 0, 8], [ 15, 0, 8], [16, 0, 8], [17, 0, 8],", "range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration, select", "employee_end_time): return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for", "employee_start_time = employee[1] employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration if (time", "size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) # change", "- feasibility \"\"\" def is_acceptable(parent): # work > 10 hours is not ok", "parents by applying a randomly generated yes/no mask to the two selected parents", "[9, 0, 10], [10, 0, 10] ], [ [0, 0, 10], [1, 0,", "0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4, 0,", "hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as hours understaffed", "0, 0, 6, 12, 12, 12, 6, 6, 18, 18, 6, 6, 6,", "(id, start time, duration) //list these 3 things \"\"\" def employee_present(employee, time): employee_start_time", "0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10], [8, 0,", "n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring = [] for i in range(n_offspring):", "of those two parents by applying a randomly generated yes/no mask to the", "[21, 0, 8], [22, 0, 8], [23, 0, 8], [24, 0, 8], [25,", "8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]],", "pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent", "= select_acceptable(parent_gen) parent_gen = select_best(parent_gen, hourlystaff_needed, n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen =", "i in range(n_offspring): random_dad = parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents", "is_acceptable(parent): # work > 10 hours is not ok return np.logical_not((np.array(parent)[:, :, 2:]", "12, 15, 10, 10, 3, 3, 3], ]) \"\"\" \"\"\" Employee present: analyse", "\"\"\" selection - cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs =", "8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8]]", "of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for", "[15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19,", "12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10, 10,", "parents \"\"\" for each iteration, select randomly two parents and make a random", "overstaff_cost * overstaff + understaff_cost * understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days,", "= np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2,", "[0, 0, 0, 0, 0, 0, 4, 4, 4, 2, 2, 2, 6,", "0, 8] ], [ [0, 0, 8], [1, 0, 8], [2, 0, 8],", "two parents and make a random combination of those two parents by applying", "[23, 0, 8], [24, 0, 8], [25, 0, 8], [26, 0, 8], [27,", "\"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in range(n_days): day_planning =", "hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff -", "parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees =", "\"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning in enumerate(parent_gen):", "def is_acceptable(parent): # work > 10 hours is not ok return np.logical_not((np.array(parent)[:, :,", "8], [27, 0, 8], [28, 0, 8], [29, 0, 8]] ] hourlystaff_needed =", "10], [7, 0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10]", "8], [14, 0, 8], [ 15, 0, 8], [16, 0, 8], [17, 0,", "pd staff_planning = [ [[0, 0, 8], [1, 0, 8], [2, 0, 8],", "3, 3, 3] ]) hourlystaff_needed = np.array( [[3, 3, 3, 3, 3, 3,", "18, 18, 18, 6, 6, 6] ]) \"\"\" \"\"\" staff_planning = [ [", "parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations))", "\"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning: hourlystaff_day = []", "0, 8], [26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0,", "and (time < employee_end_time): return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week", "for employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8) #", "idx in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size", "[2, 0, 8], [3, 0, 8], [4, 0, 8], [5, 0, 8], [6,", "duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning),", "8) # change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = [] for", "= overstaff_cost * overstaff + understaff_cost * understaff return cost \"\"\" \"\"\" def", "9, 10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]", "overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors", "8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8],", "[ 15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8],", "hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as hours", "0, 10], [8, 0, 10], [9, 0, 10], [10, 0, 10] ], ]", "12, 15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0,", "if (time >= employee_start_time) and (time < employee_end_time): return True return False \"\"\"", "8], [29, 0, 8] ] ] hourlystaff_needed = np.array([ [3, 3, 3, 3,", "[8, 0, 10], [9, 0, 10], [10, 0, 10] ], [ [0, 0,", "18, 18, 6, 6, 6], [0, 0, 0, 0, 0, 0, 6, 12,", "hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff =", "range(n_days): day_planning = [] for employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration", "[29, 0, 8]] ] hourlystaff_needed = np.array( [ [3, 3, 3, 3, 3,", "np.random.randint(0, 8) # change here return parent def mutate_gen(parent_gen, n_mutations): mutated_parent_gen = []", "for parent in parent_gen: mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\"", "12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed", "hours is not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen", "time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration if", "False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = [] for day in staff_planning: hourlystaff_day", "= [] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week =", "range(n_mutations): rand1 = np.random.randint(0, size1) rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2)", "6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2], [0,", "8).any()) def select_acceptable(parent_gen): parent_gen = [parent for parent in parent_gen if is_acceptable(parent)] return", "is_acceptable(parent)] return parent_gen \"\"\" selection - cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed,", "8], [16, 0, 8], [17, 0, 8], [18, 0, 8], [19, 0, 8],", "3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0,", "n_best=100) parent_gen = random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed,", "10, 3, 3, 3], [3, 3, 3, 3, 3, 3, 10, 7, 12,", "[5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8], [9,", "10, 12, 12, 9, 9, 12, 15, 10, 10, 3, 3, 3], ])", "hourlystaff_needed = np.array( [ [3, 3, 3, 3, 3, 3, 10, 7, 12,", "employee_start_time + employee_duration if (time >= employee_start_time) and (time < employee_end_time): return True", "3, 3, 10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9,", "10, 3, 3, 3] ]) hourlystaff_needed = np.array( [[3, 3, 3, 3, 3,", "based on the time list of 3 (id, start time, duration) //list these", "0, 8], [13, 0, 8], [14, 0, 8], [ 15, 0, 8], [16,", "employee[2] employee_end_time = employee_start_time + employee_duration if (time >= employee_start_time) and (time <", "cost(hourlystaff, hourlystaff_needed): errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff", "- cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for", "10], [1, 0, 10], [2, 0, 10], [3, 0, 10], [4, 0, 10],", "day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\" random_staff_planning = generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed)", "[7, 0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11,", "parents[np.random.randint(low=0, high=n_parents - 1)] random_mom = parents[np.random.randint(low=0, high=n_parents - 1)] dad_mask = np.random.randint(0,", "\"\"\" \"\"\" staff_planning = [ [ [0, 0, 8], [1, 0, 8], [2,", "[] for day in range(n_days): day_planning = [] for employee_id in range(n_staff): start_time", "7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15, 10,", "hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning = staffplanning_to_hourlyplanning(", "yes/no mask to the two selected parents \"\"\" def random_combine(parents, n_offspring): n_parents =", "8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ] ] hourlystaff_needed", "10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0,", "for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\" for", "2, 2, 2] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 8],", "[19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8], [23,", "= np.array([ [0, 0, 0, 0, 0, 0, 6, 12, 12, 12, 6,", "# changehere employee = [employee_id, start_time, duration] day_planning.append(employee) period_planning.append(day_planning) return period_planning \"\"\" \"\"\"", "[] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents \"\"\"", "abs(errors[errors > 0].sum()) understaff = abs(errors[errors < 0].sum()) overstaff_cost = 1 understaff_cost =", "enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def", "\"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for day in range(n_days): day_planning", "6, 6, 2, 2, 2, 2], [0, 0, 0, 0, 0, 0, 4,", "list of 3 (id, start time, duration) //list these 3 things \"\"\" def", "employee is present yes or no on a given time based on the", "10, 7, 12, 12, 9, 9, 10, 12, 12, 9, 9, 12, 15,", "generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of n parent", "{}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents", "select randomly two parents and make a random combination of those two parents", "= np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent,", "generation of n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = []", "10], [9, 0, 10], [10, 0, 10] ], [ [0, 0, 10], [1,", "best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp = pd.DataFrame(costs).sort_values( by=1,", "mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return best_child best_planning = gen_algo(hourlystaff_needed, n_iterations=100)", "= [ [ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3,", "6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2] ]) \"\"\"", "present yes or no on a given time based on the time list", "[[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8], [4,", "def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning in enumerate(parent_gen): parent_hourly_planning", "mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent): # work > 10 hours", "hourlystaff_week = [] for day in staff_planning: hourlystaff_day = [] for employee in", "= generate_random_staff_planning(n_days=5, n_staff=30) random_staff_planning cost(staffplanning_to_hourlyplanning(random_staff_planning), hourlystaff_needed) \"\"\" create a parent generation of n", "mutated_parent_gen.append(mutate_parent(parent, n_mutations)) return mutated_parent_gen \"\"\" selection - feasibility \"\"\" def is_acceptable(parent): # work", "9, 12, 15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0,", "8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0, 8],", "selected_parents_idx] return selected_parents \"\"\" overall func \"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000", "hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated as hours understaffed +", "random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees = len(parents[0][0]) offspring =", "8], [18, 0, 8], [19, 0, 8], [20, 0, 8], [21, 0, 8],", "costs.append([idx, parent_cost]) print('generation best is: {}, generation worst is: {}'.format( pd.DataFrame(costs)[1].min(), pd.DataFrame(costs)[1].max())) costs_tmp", "6, 6, 6] ]) \"\"\" \"\"\" staff_planning = [ [ [0, 0, 10],", "= [] for i in range(n_parents): parent = generate_random_staff_planning(n_days=n_days, n_staff=n_staff) parents.append(parent) return parents", "\"\"\" def gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30)", "0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ], [", "in range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0, 8) # changehere employee", "np.logical_not(dad_mask) child = np.add(np.multiply(random_dad, dad_mask), np.multiply(random_mom, mom_mask)) offspring.append(child) return offspring \"\"\" mutation \"\"\"", "[26, 0, 8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ]", "= [] for employee in day: employee_present_hour = [] for time in range(0,", "those two parents by applying a randomly generated yes/no mask to the two", "\"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2 = parent.shape[1] for", "10], [10, 0, 10] ], [ [0, 0, 10], [1, 0, 10], [2,", "employee_present(employee, time): employee_start_time = employee[1] employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration", "selected parents \"\"\" def random_combine(parents, n_offspring): n_parents = len(parents) n_periods = len(parents[0]) n_employees", "8], [27, 0, 8], [28, 0, 8], [29, 0, 8] ], [ [0,", "6, 6, 6, 6, 18, 18, 18, 6, 6, 6], [0, 0, 0,", "n_parents=generation_size, n_days=5, n_staff=30) for it in range(n_iterations): parent_gen = select_acceptable(parent_gen) parent_gen = select_best(parent_gen,", "not ok return np.logical_not((np.array(parent)[:, :, 2:] > 8).any()) def select_acceptable(parent_gen): parent_gen = [parent", "0, 8], [15, 0, 8], [16, 0, 8], [17, 0, 8], [18, 0,", "cost (inverse fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx,", "10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([ [0, 0, 0, 0, 0,", ">= employee_start_time) and (time < employee_end_time): return True return False \"\"\" \"\"\" def", "[] for employee in day: employee_present_hour = [] for time in range(0, 24):", "applying a randomly generated yes/no mask to the two selected parents \"\"\" def", "np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8) #", "np.array( [[3, 3, 3, 3, 3, 3, 10, 7, 12, 12, 9, 9,", "calculated as hours understaffed + hours overstaffed \"\"\" def cost(hourlystaff, hourlystaff_needed): errors =", "[3, 0, 8], [4, 0, 8], [5, 0, 8], [6, 0, 8], [7,", "], ] hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 4, 4,", "= [] for employee_id in range(n_staff): start_time = np.random.randint(0, 23) duration = np.random.randint(0,", "errors = hourlystaff - hourlystaff_needed overstaff = abs(errors[errors > 0].sum()) understaff = abs(errors[errors", "0, 8], [8, 0, 8], [9, 0, 8], [10, 0, 8], [11, 0,", "n parent plannings \"\"\" def create_parent_generation(n_parents, n_days=5, n_staff=30): parents = [] for i", "randomly generated yes/no mask to the two selected parents \"\"\" def random_combine(parents, n_offspring):", "rand2 = np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0,", "selection - feasibility \"\"\" def is_acceptable(parent): # work > 10 hours is not", "* overstaff + understaff_cost * understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff):", "for idx, parent in enumerate( parent_gen) if idx in selected_parents_idx] return selected_parents \"\"\"", "= random_combine(parent_gen, n_offspring=generation_size) parent_gen = mutate_gen(parent_gen, n_mutations=1) best_child = select_best(parent_gen, hourlystaff_needed, n_best=1) return", "[12, 0, 8], [13, 0, 8], [14, 0, 8], [15, 0, 8], [16,", "= pd.DataFrame(costs).sort_values( by=1, ascending=True).reset_index(drop=True) selected_parents_idx = list(costs_tmp.iloc[:n_best, 0]) selected_parents = [parent for idx,", "employee[1] employee_duration = employee[2] employee_end_time = employee_start_time + employee_duration if (time >= employee_start_time)", "8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0, 8],", "n_staff=n_staff) parents.append(parent) return parents \"\"\" for each iteration, select randomly two parents and", "[ [[0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0, 8],", "parent_cost = cost(parent_hourly_planning, hourlystaff_needed) costs.append([idx, parent_cost]) print('generation best is: {}, generation worst is:", "], [ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3, 0,", "[] for time in range(0, 24): employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1)", "n_staff): period_planning = [] for day in range(n_days): day_planning = [] for employee_id", "10], [4, 0, 10], [5, 0, 10], [6, 0, 10], [7, 0, 10],", "* understaff return cost \"\"\" \"\"\" def generate_random_staff_planning(n_days, n_staff): period_planning = [] for", "8], [27, 0, 8], [28, 0, 8], [29, 0, 8]], [[0, 0, 8],", "15, 10, 10, 3, 3, 3], ]) \"\"\" \"\"\" Employee present: analyse whether", "2, 6, 6, 2, 2, 2, 6, 6, 6, 2, 2, 2, 2]", "fitness) \"\"\" def select_best(parent_gen, hourlystaff_needed, n_best): costs = [] for idx, parent_staff_planning in", "offspring.append(child) return offspring \"\"\" mutation \"\"\" def mutate_parent(parent, n_mutations): size1 = parent.shape[0] size2", "< 0].sum()) overstaff_cost = 1 understaff_cost = 1 cost = overstaff_cost * overstaff", "0, 10], [10, 0, 10] ], ] hourlystaff_needed = np.array([ [0, 0, 0,", "[28, 0, 8], [29, 0, 8]], [[0, 0, 8], [1, 0, 8], [2,", "0].sum()) overstaff_cost = 1 understaff_cost = 1 cost = overstaff_cost * overstaff +", "import pandas as pd staff_planning = [ [[0, 0, 8], [1, 0, 8],", "8] ], [ [0, 0, 8], [1, 0, 8], [2, 0, 8], [3,", "8], [19, 0, 8], [20, 0, 8], [21, 0, 8], [22, 0, 8],", "employee_present_hour.append(employee_present(employee, time)) hourlystaff_day.append(employee_present_hour) hourlystaff_week.append(hourlystaff_day) hourlystaff_week = np.array(hourlystaff_week).sum(axis=1) return hourlystaff_week \"\"\" cost is calculated", "hourlystaff_needed = np.array([ [0, 0, 0, 0, 0, 0, 6, 12, 12, 12,", "< employee_end_time): return True return False \"\"\" \"\"\" def staffplanning_to_hourlyplanning(staff_planning): hourlystaff_week = []", "= np.random.randint(0, size2) rand3 = np.random.randint(1, 2) parent[rand1, rand2, rand3] = np.random.randint(0, 8)", "gen_algo(hourlystaff_needed, n_iterations): generation_size = 1000 parent_gen = create_parent_generation( n_parents=generation_size, n_days=5, n_staff=30) for it", "in range(n_days): day_planning = [] for employee_id in range(n_staff): start_time = np.random.randint(0, 23)", "10], [2, 0, 10], [3, 0, 10], [4, 0, 10], [5, 0, 10],", "0, 8], [5, 0, 8], [6, 0, 8], [7, 0, 8], [8, 0,", "0, 8], [28, 0, 8], [29, 0, 8]], [[0, 0, 8], [1, 0,", "return parents \"\"\" for each iteration, select randomly two parents and make a", "[9, 0, 8], [10, 0, 8], [11, 0, 8], [12, 0, 8], [13,", "9, 9, 12, 15, 10, 10, 3, 3, 3]]) \"\"\" hourlystaff_needed = np.array([", "[28, 0, 8], [29, 0, 8]] ] hourlystaff_needed = np.array( [ [3, 3,", "[13, 0, 8], [14, 0, 8], [ 15, 0, 8], [16, 0, 8],", "time, duration) //list these 3 things \"\"\" def employee_present(employee, time): employee_start_time = employee[1]", "6, 6, 6], [0, 0, 0, 0, 0, 0, 6, 12, 12, 12,", "hourlystaff_week \"\"\" cost is calculated as hours understaffed + hours overstaffed \"\"\" def", "employee_duration if (time >= employee_start_time) and (time < employee_end_time): return True return False", "2, 2, 2], [0, 0, 0, 0, 0, 0, 4, 4, 4, 2," ]
[ "Team' # The full version, including alpha/beta/rc tags. release = __version__ # --", "html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated':", "-- Options for HTML output ------------------------------------------------- # The theme to use for HTML", "3, 'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context", "Client Library for Web Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. -------------------", "2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project information -----------------------------------------------------", "'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon =", "terms of the GNU General Public License as published by the Free Software", "language = 'en_US' # List of patterns, relative to source directory, that match", "# List of patterns, relative to source directory, that match files and #", "the License, or (at your option) any later version. WTSS QGIS Plugin documentation", "False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo =", "The language for content autogenerated by Sphinx. language = 'en_US' # List of", "content autogenerated by Sphinx. language = 'en_US' # List of patterns, relative to", "'sphinx_rtd_theme', ] # Paths that contain templates, relative to this directory. templates_path =", "The full version, including alpha/beta/rc tags. release = __version__ # -- General configuration", "= 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo':", "either version 2 of the License, or (at your option) any later version.", "pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True,", "# The theme to use for HTML and HTML Help pages. html_theme =", "= 'en_US' # List of patterns, relative to source directory, that match files", "'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.:", "or (at your option) any later version. WTSS QGIS Plugin documentation build configuration", "output ------------------------------------------------- # The theme to use for HTML and HTML Help pages.", "is free software. You can redistribute it and/or modify it under the terms", "%s -', 'code-block': 'Code snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text =", "html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files =", "'Thumbs.db', '.DS_Store', ] # -- Options for HTML output ------------------------------------------------- # The theme", "directory, that match files and # directories to ignore when looking for source", "\"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project information ----------------------------------------------------- project", "sha : $Format:%H$ copyright : (C) 2020 by INPE email : <EMAIL> This", "'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation':", "of patterns, relative to source directory, that match files and # directories to", "theme to use for HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options", "False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path =", "Data Cube Team' # The full version, including alpha/beta/rc tags. release = __version__", "-', 'code-block': 'Code snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text = r'>>>", "email : <EMAIL> This program is free software. You can redistribute it and/or", "'#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title", "= './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files = [ ] html_last_updated_fmt =", "by Sphinx. language = 'en_US' # List of patterns, relative to source directory,", "to ignore when looking for source files. # This pattern also affects html_static_path", ": (C) 2020 by INPE email : <EMAIL> This program is free software.", "] html_css_files = [ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False", "'Brazil Data Cube Team' # The full version, including alpha/beta/rc tags. release =", "snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In", "by sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version", "when looking for source files. # This pattern also affects html_static_path and html_extra_path.", "this directory. templates_path = ['_templates'] # The language for content autogenerated by Sphinx.", "html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True,", "] # -- Options for HTML output ------------------------------------------------- # The theme to use", "software. You can redistribute it and/or modify it under the terms of the", "the Free Software Foundation; either version 2 of the License, or (at your", "by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha : $Format:%H$ copyright", "= '%b %d, %Y' html_show_sphinx = False html_search_language = 'en' numfig = True", "= __version__ # -- General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions =", "This program is free software. You can redistribute it and/or modify it under", "%s -', 'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]:", "from wtss_plugin.version import __version__ # -- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright", "False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False,", "and # directories to ignore when looking for source files. # This pattern", "(C) 2020 by INPE email : <EMAIL> This program is free software. You", "numfig_format = { 'figure': 'Figure %s -', 'table': 'Table %s -', 'code-block': 'Code", "= { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink =", "'last_updated': False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path", "] # Paths that contain templates, relative to this directory. templates_path = ['_templates']", "Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ]", "html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location':", "# -- General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc',", "use for HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = {", "{ 'figure': 'Figure %s -', 'table': 'Table %s -', 'code-block': 'Code snippet %s", "'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only':", "ignore when looking for source files. # This pattern also affects html_static_path and", "'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title =", "'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico'", "as published by the Free Software Foundation; either version 2 of the License,", "License, or (at your option) any later version. WTSS QGIS Plugin documentation build", "%Y' html_show_sphinx = False html_search_language = 'en' numfig = True numfig_format = {", "under the terms of the GNU General Public License as published by the", "'./assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files = [ ]", "'code-block': 'Code snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\.", "2020 by INPE email : <EMAIL> This program is free software. You can", "import __version__ # -- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020,", "begin : 2019-05-04 git sha : $Format:%H$ copyright : (C) 2020 by INPE", "= ['_templates'] # The language for content autogenerated by Sphinx. language = 'en_US'", "WTSS QGIS Plugin documentation build configuration file, created by sphinx-quickstart on Sun Feb", "-', 'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: |", "also affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] #", "INPE.' author = 'Brazil Data Cube Team' # The full version, including alpha/beta/rc", "that contain templates, relative to this directory. templates_path = ['_templates'] # The language", "files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ '_build',", "'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis',", "can redistribute it and/or modify it under the terms of the GNU General", "'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png'", "|\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp = True master_doc", "License as published by the Free Software Foundation; either version 2 of the", "'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates, relative to this directory.", "QGIS Plugin. Python Client Library for Web Time Series Service. Generated by Plugin", "build configuration file, created by sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\"", "templates, relative to this directory. templates_path = ['_templates'] # The language for content", "directories to ignore when looking for source files. # This pattern also affects", "for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns =", "{ 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False", "copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp", "# This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db',", "'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon", "it and/or modify it under the terms of the GNU General Public License", "# Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme',", "Sphinx. language = 'en_US' # List of patterns, relative to source directory, that", ": <EMAIL> This program is free software. You can redistribute it and/or modify", "= False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ]", "'.DS_Store', ] # -- Options for HTML output ------------------------------------------------- # The theme to", "documentation build configuration file, created by sphinx-quickstart on Sun Feb 12 17:11:03 2012.", "to this directory. templates_path = ['_templates'] # The language for content autogenerated by", "'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/'", "'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl", "= [ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False html_search_language =", "'_static', ] html_css_files = [ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx =", ": $Format:%H$ copyright : (C) 2020 by INPE email : <EMAIL> This program", "Paths that contain templates, relative to this directory. templates_path = ['_templates'] # The", "'./assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files = [ ] html_last_updated_fmt = '%b", "__version__ # -- General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions = [", "General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon',", "extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that", "sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import", "= 'Brazil Data Cube Team' # The full version, including alpha/beta/rc tags. release", "# Paths that contain templates, relative to this directory. templates_path = ['_templates'] #", "HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help", "General Public License as published by the Free Software Foundation; either version 2", "autogenerated by Sphinx. language = 'en_US' # List of patterns, relative to source", "'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False }", "|\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp = True master_doc =", "# -- Options for HTML output ------------------------------------------------- # The theme to use for", "'%b %d, %Y' html_show_sphinx = False html_search_language = 'en' numfig = True numfig_format", "published by the Free Software Foundation; either version 2 of the License, or", "the terms of the GNU General Public License as published by the Free", "GNU General Public License as published by the Free Software Foundation; either version", "False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl =", "of the GNU General Public License as published by the Free Software Foundation;", "extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths", "information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil Data", "html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files = [ ] html_last_updated_fmt", "$Format:%H$ copyright : (C) 2020 by INPE email : <EMAIL> This program is", "directory. templates_path = ['_templates'] # The language for content autogenerated by Sphinx. language", "for content autogenerated by Sphinx. language = 'en_US' # List of patterns, relative", "= { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background':", "Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version':", "= 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False,", "import sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project information ----------------------------------------------------- project =", "= False html_search_language = 'en' numfig = True numfig_format = { 'figure': 'Figure", "'2020, INPE.' author = 'Brazil Data Cube Team' # The full version, including", "Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git", "{ 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9',", "= 'en' numfig = True numfig_format = { 'figure': 'Figure %s -', 'table':", "Free Software Foundation; either version 2 of the License, or (at your option)", "by INPE email : <EMAIL> This program is free software. You can redistribute", "html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink", "|In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp = True master_doc = 'index'", "# The language for content autogenerated by Sphinx. language = 'en_US' # List", "contain templates, relative to this directory. templates_path = ['_templates'] # The language for", "----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil Data Cube", "WTSS QGIS Plugin. Python Client Library for Web Time Series Service. Generated by", "author = 'Brazil Data Cube Team' # The full version, including alpha/beta/rc tags.", "'Code snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$", "exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options for HTML output", "by the Free Software Foundation; either version 2 of the License, or (at", "created by sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from", "= r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp =", "configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo',", "looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns", "for HTML output ------------------------------------------------- # The theme to use for HTML and HTML", "2019-05-04 git sha : $Format:%H$ copyright : (C) 2020 by INPE email :", "= [ '_static', ] html_css_files = [ ] html_last_updated_fmt = '%b %d, %Y'", "patterns, relative to source directory, that match files and # directories to ignore", "%s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}:", "it under the terms of the GNU General Public License as published by", "file, created by sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme", "'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, } html_show_sourcelink = False html_logo", "= True numfig_format = { 'figure': 'Figure %s -', 'table': 'Table %s -',", "True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS'", "html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False html_search_language = 'en' numfig =", "for HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id':", "version, including alpha/beta/rc tags. release = __version__ # -- General configuration --------------------------------------------------- #", "True numfig_format = { 'figure': 'Figure %s -', 'table': 'Table %s -', 'code-block':", "for Web Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin :", "True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = {", "= [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain", "wtss_plugin.version import __version__ # -- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright =", "Library for Web Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin", "__version__ # -- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.'", "} html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [", "= { 'figure': 'Figure %s -', 'table': 'Table %s -', 'code-block': 'Code snippet", "True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3,", "r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: ' copybutton_prompt_is_regexp = True", "(at your option) any later version. WTSS QGIS Plugin documentation build configuration file,", "-- General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest',", "'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links':", "option) any later version. WTSS QGIS Plugin documentation build configuration file, created by", "[ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options for HTML output ------------------------------------------------- #", "Foundation; either version 2 of the License, or (at your option) any later", "'_build', 'Thumbs.db', '.DS_Store', ] # -- Options for HTML output ------------------------------------------------- # The", "sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project information ----------------------------------------------------- project = 'WTSS-QGIS'", "to source directory, that match files and # directories to ignore when looking", "Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil", "source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [", "and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options for", "the GNU General Public License as published by the Free Software Foundation; either", "html_show_sourcelink = False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static',", "including alpha/beta/rc tags. release = __version__ # -- General configuration --------------------------------------------------- # Enabled", "['_templates'] # The language for content autogenerated by Sphinx. language = 'en_US' #", "} copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: | {5,8}: '", "relative to this directory. templates_path = ['_templates'] # The language for content autogenerated", "This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store',", "Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha", "'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True,", "] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False html_search_language = 'en' numfig", "version 2 of the License, or (at your option) any later version. WTSS", "copyright : (C) 2020 by INPE email : <EMAIL> This program is free", "Plugin documentation build configuration file, created by sphinx-quickstart on Sun Feb 12 17:11:03", "'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates, relative", "relative to source directory, that match files and # directories to ignore when", "Software Foundation; either version 2 of the License, or (at your option) any", "HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX',", "'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth':", "Options for HTML output ------------------------------------------------- # The theme to use for HTML and", "html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options for HTML", "Python Client Library for Web Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/.", ": 2019-05-04 git sha : $Format:%H$ copyright : (C) 2020 by INPE email", "html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube',", "'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil Data Cube Team' # The", "--------------------------------------------------- # Enabled Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton',", "QGIS Plugin documentation build configuration file, created by sphinx-quickstart on Sun Feb 12", "# directories to ignore when looking for source files. # This pattern also", "Sphinx extensions. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] #", "full version, including alpha/beta/rc tags. release = __version__ # -- General configuration ---------------------------------------------------", "templates_path = ['_templates'] # The language for content autogenerated by Sphinx. language =", "free software. You can redistribute it and/or modify it under the terms of", "redistribute it and/or modify it under the terms of the GNU General Public", "of the License, or (at your option) any later version. WTSS QGIS Plugin", "'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden':", "12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project", "'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github':", "html_static_path = [ '_static', ] html_css_files = [ ] html_last_updated_fmt = '%b %d,", "'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates, relative to this", "Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha :", "# -- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.' author", "any later version. WTSS QGIS Plugin documentation build configuration file, created by sphinx-quickstart", "source directory, that match files and # directories to ignore when looking for", "# The full version, including alpha/beta/rc tags. release = __version__ # -- General", "The theme to use for HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme'", "True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False, 'navigation_depth': 3, 'includehidden': True, 'titles_only': False", "'includehidden': True, 'titles_only': False } html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context =", "HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False,", "and/or modify it under the terms of the GNU General Public License as", "= './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files = [", "<EMAIL> This program is free software. You can redistribute it and/or modify it", "Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha : $Format:%H$ copyright :", "files and # directories to ignore when looking for source files. # This", "False html_search_language = 'en' numfig = True numfig_format = { 'figure': 'Figure %s", "Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha : $Format:%H$ copyright : (C)", "http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha : $Format:%H$ copyright : (C) 2020", "[ '_static', ] html_css_files = [ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx", "List of patterns, relative to source directory, that match files and # directories", "} html_title = 'WTSS-QGIS' html_baseurl = 'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user':", "Plugin. Python Client Library for Web Time Series Service. Generated by Plugin Builder:", "Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04 git sha : $Format:%H$", "match files and # directories to ignore when looking for source files. #", "and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only':", "'Figure %s -', 'table': 'Table %s -', 'code-block': 'Code snippet %s -', 'section':", "'en_US' # List of patterns, relative to source directory, that match files and", "\"\"\" WTSS QGIS Plugin. Python Client Library for Web Time Series Service. Generated", "'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True,", "configuration file, created by sphinx-quickstart on Sun Feb 12 17:11:03 2012. \"\"\" import", "pattern also affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ]", "your option) any later version. WTSS QGIS Plugin documentation build configuration file, created", "Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ # --", "-- Project information ----------------------------------------------------- project = 'WTSS-QGIS' copyright = '2020, INPE.' author =", "[ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates,", "'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates, relative to this directory. templates_path", "Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ #", "------------------------------------------------- # The theme to use for HTML and HTML Help pages. html_theme", "modify it under the terms of the GNU General Public License as published", "numfig = True numfig_format = { 'figure': 'Figure %s -', 'table': 'Table %s", "= 'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil Data Cube Team' #", "'Table %s -', 'code-block': 'Code snippet %s -', 'section': 'Section %s.' } copybutton_prompt_text", "Web Time Series Service. Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/. ------------------- begin : 2019-05-04", "tags. release = __version__ # -- General configuration --------------------------------------------------- # Enabled Sphinx extensions.", "%d, %Y' html_show_sphinx = False html_search_language = 'en' numfig = True numfig_format =", "= 'sphinx_rtd_theme' html_theme_options = { 'analytics_id': 'XXXXXXXXXX', 'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both',", "html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options", "'logo_only': False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation':", "Cube Team' # The full version, including alpha/beta/rc tags. release = __version__ #", "'Section %s.' } copybutton_prompt_text = r'>>> |\\.\\.\\. |\\$ |In \\[\\d*\\]: | {2,5}\\.\\.\\.: |", "2 of the License, or (at your option) any later version. WTSS QGIS", "False html_logo = './assets/img/logo-bdc.png' html_favicon = './assets/img/favicon.ico' html_static_path = [ '_static', ] html_css_files", "'table': 'Table %s -', 'code-block': 'Code snippet %s -', 'section': 'Section %s.' }", "project = 'WTSS-QGIS' copyright = '2020, INPE.' author = 'Brazil Data Cube Team'", "%s -', 'table': 'Table %s -', 'code-block': 'Code snippet %s -', 'section': 'Section", "version. WTSS QGIS Plugin documentation build configuration file, created by sphinx-quickstart on Sun", "'en' numfig = True numfig_format = { 'figure': 'Figure %s -', 'table': 'Table", "------------------- begin : 2019-05-04 git sha : $Format:%H$ copyright : (C) 2020 by", "that match files and # directories to ignore when looking for source files.", "language for content autogenerated by Sphinx. language = 'en_US' # List of patterns,", "Public License as published by the Free Software Foundation; either version 2 of", "later version. WTSS QGIS Plugin documentation build configuration file, created by sphinx-quickstart on", "copyright = '2020, INPE.' author = 'Brazil Data Cube Team' # The full", "'https://brazil-data-cube.github.io/' html_context = { 'display_github': False, 'github_user': 'brazil-data-cube', 'github_repo': 'wtss-qgis', 'last_updated': False, }", "html_search_language = 'en' numfig = True numfig_format = { 'figure': 'Figure %s -',", "html_css_files = [ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False html_search_language", "release = __version__ # -- General configuration --------------------------------------------------- # Enabled Sphinx extensions. extensions", "INPE email : <EMAIL> This program is free software. You can redistribute it", "html_show_sphinx = False html_search_language = 'en' numfig = True numfig_format = { 'figure':", "-', 'table': 'Table %s -', 'code-block': 'Code snippet %s -', 'section': 'Section %s.'", "'figure': 'Figure %s -', 'table': 'Table %s -', 'code-block': 'Code snippet %s -',", "17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__ # -- Project information", "[ ] html_last_updated_fmt = '%b %d, %Y' html_show_sphinx = False html_search_language = 'en'", "You can redistribute it and/or modify it under the terms of the GNU", "alpha/beta/rc tags. release = __version__ # -- General configuration --------------------------------------------------- # Enabled Sphinx", "= [ '_build', 'Thumbs.db', '.DS_Store', ] # -- Options for HTML output -------------------------------------------------", "False, 'display_version': True, 'prev_next_buttons_location': 'both', 'style_external_links': True, 'style_nav_header_background': '#2980B9', 'collapse_navigation': True, 'sticky_navigation': False,", "on Sun Feb 12 17:11:03 2012. \"\"\" import sphinx_rtd_theme from wtss_plugin.version import __version__", "= '2020, INPE.' author = 'Brazil Data Cube Team' # The full version,", "program is free software. You can redistribute it and/or modify it under the", "'sphinx.ext.doctest', 'sphinx.ext.napoleon', 'sphinx.ext.todo', 'sphinx_copybutton', 'sphinx_rtd_theme', ] # Paths that contain templates, relative to", "affects html_static_path and html_extra_path. exclude_patterns = [ '_build', 'Thumbs.db', '.DS_Store', ] # --", "to use for HTML and HTML Help pages. html_theme = 'sphinx_rtd_theme' html_theme_options =", "git sha : $Format:%H$ copyright : (C) 2020 by INPE email : <EMAIL>" ]
[ "self.y def save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) +", "self.level * 10) / 10 - self.level self.y[i] = self.level * 10 *", "import random import numpy as np import time class Signalgenerator(): def __init__(self): self.Fs", "+ str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample):", "'\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i in", "range(0, self.sample): delta = random.randint(1, self.level * 10) / 10 - self.level self.y[i]", "'' def set_filename(self, name): self.filename = name def configure_device(self, level): self.level = level", "self.level * 10 * np.cos(2* np.pi * self.f * i / self.Fs) +", "get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' +", "configure_device(self, level): self.level = level def measure_signal(self): for i in range(0, self.sample): delta", "10) / 10 - self.level self.y[i] = self.level * 10 * np.cos(2* np.pi", "* i / self.Fs) + delta def get_signal(self): return self.y def save_signal(self): with", "self.f = 2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample)", "= 8000 self.f = 2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y", "f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) +", "self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self, name):", "* self.f * i / self.Fs) + delta def get_signal(self): return self.y def", "def get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w') as f: f.write('Time='", "= self.level * 10 * np.cos(2* np.pi * self.f * i / self.Fs)", "as np import time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f =", "np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self, name): self.filename = name", "self.f * i / self.Fs) + delta def get_signal(self): return self.y def save_signal(self):", "self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename = ''", "= np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self, name): self.filename =", "- self.level self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f *", "/ self.Fs) + delta def get_signal(self): return self.y def save_signal(self): with open (self.filename,", "with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' +", "random.randint(1, self.level * 10) / 10 - self.level self.y[i] = self.level * 10", "'\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\\t' + str(self.y[i]) +", "f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n')", "8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename =", "measure_signal(self): for i in range(0, self.sample): delta = random.randint(1, self.level * 10) /", "as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10))", "'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level *", "class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2 self.sample = 8000", "2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level =", "level): self.level = level def measure_signal(self): for i in range(0, self.sample): delta =", "__init__(self): self.Fs = 8000 self.f = 2 self.sample = 8000 self.x = np.arange(1,", "self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) +", "f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\\t' + str(self.y[i]) + '\\n')", "= random.randint(1, self.level * 10) / 10 - self.level self.y[i] = self.level *", "8000 self.f = 2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y =", "np.pi * self.f * i / self.Fs) + delta def get_signal(self): return self.y", "random import numpy as np import time class Signalgenerator(): def __init__(self): self.Fs =", "+ str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n')", "numpy as np import time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f", "def configure_device(self, level): self.level = level def measure_signal(self): for i in range(0, self.sample):", "def __init__(self): self.Fs = 8000 self.f = 2 self.sample = 8000 self.x =", "Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2 self.sample = 8000 self.x", "10 * np.cos(2* np.pi * self.f * i / self.Fs) + delta def", "save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity='", "time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2 self.sample =", "= level def measure_signal(self): for i in range(0, self.sample): delta = random.randint(1, self.level", "import time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2 self.sample", "self.Fs = 8000 self.f = 2 self.sample = 8000 self.x = np.arange(1, self.sample+1)", "for i in range(0, self.sample): delta = random.randint(1, self.level * 10) / 10", "+ delta def get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w') as", "self.filename = name def configure_device(self, level): self.level = level def measure_signal(self): for i", "import numpy as np import time class Signalgenerator(): def __init__(self): self.Fs = 8000", "delta = random.randint(1, self.level * 10) / 10 - self.level self.y[i] = self.level", "f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i in range(0,", "* 10)) + '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\\t'", "* 10 * np.cos(2* np.pi * self.f * i / self.Fs) + delta", "10 - self.level self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f", "np.cos(2* np.pi * self.f * i / self.Fs) + delta def get_signal(self): return", "def save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n')", "def set_filename(self, name): self.filename = name def configure_device(self, level): self.level = level def", "np import time class Signalgenerator(): def __init__(self): self.Fs = 8000 self.f = 2", "name): self.filename = name def configure_device(self, level): self.level = level def measure_signal(self): for", "* 10) / 10 - self.level self.y[i] = self.level * 10 * np.cos(2*", "level def measure_signal(self): for i in range(0, self.sample): delta = random.randint(1, self.level *", "self.sample): delta = random.randint(1, self.level * 10) / 10 - self.level self.y[i] =", "np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self,", "return self.y def save_signal(self): with open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time())))", "str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i])", "set_filename(self, name): self.filename = name def configure_device(self, level): self.level = level def measure_signal(self):", "10)) + '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\\t' +", "open (self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1,", "delta def get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w') as f:", "self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f * i /", "= 0 self.filename = '' def set_filename(self, name): self.filename = name def configure_device(self,", "+ '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for i", "self.y = np.empty(self.sample) self.level = 0 self.filename = '' def set_filename(self, name): self.filename", "def measure_signal(self): for i in range(0, self.sample): delta = random.randint(1, self.level * 10)", "* np.cos(2* np.pi * self.f * i / self.Fs) + delta def get_signal(self):", "name def configure_device(self, level): self.level = level def measure_signal(self): for i in range(0,", "= 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename", "str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level * 10)) + '\\n') f.write('Spectrum:\\n') for", "= np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0 self.filename = '' def", "+ '\\n') f.write('Spectrum:\\n') for i in range(0, self.sample): f.write(str(self.x[i]) + '\\t' + str(self.y[i])", "self.level = level def measure_signal(self): for i in range(0, self.sample): delta = random.randint(1,", "= '' def set_filename(self, name): self.filename = name def configure_device(self, level): self.level =", "self.level = 0 self.filename = '' def set_filename(self, name): self.filename = name def", "= name def configure_device(self, level): self.level = level def measure_signal(self): for i in", "0 self.filename = '' def set_filename(self, name): self.filename = name def configure_device(self, level):", "/ 10 - self.level self.y[i] = self.level * 10 * np.cos(2* np.pi *", "= 2 self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level", "i / self.Fs) + delta def get_signal(self): return self.y def save_signal(self): with open", "(self.filename, 'w') as f: f.write('Time=' + str(time.asctime(time.localtime(time.time()))) + '\\n') f.write('Intensity=' + str(random.randint(1, self.level", "self.sample = 8000 self.x = np.arange(1, self.sample+1) self.y = np.empty(self.sample) self.level = 0", "self.level self.y[i] = self.level * 10 * np.cos(2* np.pi * self.f * i", "in range(0, self.sample): delta = random.randint(1, self.level * 10) / 10 - self.level", "self.Fs) + delta def get_signal(self): return self.y def save_signal(self): with open (self.filename, 'w')", "i in range(0, self.sample): delta = random.randint(1, self.level * 10) / 10 -", "self.filename = '' def set_filename(self, name): self.filename = name def configure_device(self, level): self.level" ]
[ "user is not None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading", "self.flaskapp.test_request_context(): user = load_user(1) assert user is not None assert user.username == 'ganemone'", "@fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user =", "test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert", "import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid", "user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not in the", "def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1)", "not in the database\"\"\" with self.flaskapp.test_request_context(): user = load_user(50) assert user is None", "user not in the database\"\"\" with self.flaskapp.test_request_context(): user = load_user(50) assert user is", "loading a user not in the database\"\"\" with self.flaskapp.test_request_context(): user = load_user(50) assert", "single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user is not None", "loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user is", "assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not in", "register, login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test", "valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user is not None assert", "# , register, login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def", "fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with", "a user not in the database\"\"\" with self.flaskapp.test_request_context(): user = load_user(50) assert user", "\"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user", "login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading", "test_load_nonexisting_user(self): \"\"\"Test loading a user not in the database\"\"\" with self.flaskapp.test_request_context(): user =", "from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a", "def test_load_nonexisting_user(self): \"\"\"Test loading a user not in the database\"\"\" with self.flaskapp.test_request_context(): user", "= load_user(1) assert user is not None assert user.username == 'ganemone' @fixtures('base.json') def", "not None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user", "@fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not in the database\"\"\" with self.flaskapp.test_request_context():", "user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user is not None assert user.username", "is not None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a", "'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not in the database\"\"\" with", "import load_user # , register, login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase):", "FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\"", "None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not", "with self.flaskapp.test_request_context(): user = load_user(1) assert user is not None assert user.username ==", ", register, login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self):", "class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context():", "a single valid user\"\"\" with self.flaskapp.test_request_context(): user = load_user(1) assert user is not", "\"\"\"Test loading a user not in the database\"\"\" with self.flaskapp.test_request_context(): user = load_user(50)", "user = load_user(1) assert user is not None assert user.username == 'ganemone' @fixtures('base.json')", "TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single valid user\"\"\" with self.flaskapp.test_request_context(): user", "assert user is not None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test", "from server.mod_auth.auth import load_user # , register, login from server.tests.helpers import FlaskTestCase, fixtures", "== 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self): \"\"\"Test loading a user not in the database\"\"\"", "server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json') def test_load_existing_user(self): \"\"\"Test loading a single", "load_user # , register, login from server.tests.helpers import FlaskTestCase, fixtures class TestAuth(FlaskTestCase): @fixtures('single_user.json')", "server.mod_auth.auth import load_user # , register, login from server.tests.helpers import FlaskTestCase, fixtures class", "load_user(1) assert user is not None assert user.username == 'ganemone' @fixtures('base.json') def test_load_nonexisting_user(self):" ]
[ "elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of virus", "bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args() # this can", "for human reads with open(args.s) as statFH: for line in statFH: f =", "#!/usr/bin/env python3 import argparse parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\")", "[\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\",", "{} # stat file for human reads with open(args.s) as statFH: for line", "else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict: cov = virDict[key][0]", "= cov/bases else: avgCov = 0 normCp = (avgCov) / (huReads/1000) # viral", "bedtools and bamindexstat then processing those outputs nums = list(range(1, 23)) chroms =", "outputs nums = list(range(1, 23)) chroms = [\"chr\" + str(num) for num in", "those outputs nums = list(range(1, 23)) chroms = [\"chr\" + str(num) for num", "with open(args.s) as statFH: for line in statFH: f = line.split() if str(f[0])", "# mcpyv.cov file for coverage of virus virDict = {} with open(args.g) as", "virus virDict = {} with open(args.g) as covFH: for line in covFH: f", "key in virDict: cov = virDict[key][0] bases = virDict[key][1] if (cov > 0):", "file and then running bedtools and bamindexstat then processing those outputs nums =", "str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num)", "= parser.parse_args() # this can be simplified by just importing the bam file", "file for coverage of virus virDict = {} with open(args.g) as covFH: for", "= 0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} # stat", "virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) -", "then running bedtools and bamindexstat then processing those outputs nums = list(range(1, 23))", "= path[-1].split(\".\") lengths = {} # stat file for human reads with open(args.s)", "\"MT\"]) chroms.extend(str(num) for num in nums) huReads = 0 path = args.s.split(\"/\") ext", "int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict:", "int(f[2])}) # mcpyv.cov file for coverage of virus virDict = {} with open(args.g)", "f = line.split() if str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0]) !=", "!= 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of virus virDict =", "f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2])", "chroms = [\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\",", "+= int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]})", "int(f[1])]}) for key in virDict: cov = virDict[key][0] bases = virDict[key][1] if (cov", "bases = virDict[key][1] if (cov > 0): avgCov = cov/bases else: avgCov =", "(cov > 0): avgCov = cov/bases else: avgCov = 0 normCp = (avgCov)", "\"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads = 0", "coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp, huReads, bases, avgCov, cov, lengths[key]))", "ext = path[-1].split(\".\") lengths = {} # stat file for human reads with", "stat file for human reads with open(args.s) as statFH: for line in statFH:", "chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file", "line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1])", "path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} # stat file for", "file for human reads with open(args.s) as statFH: for line in statFH: f", "statFH: for line in statFH: f = line.split() if str(f[0]) in chroms: huReads", "str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of virus virDict", "covFH: for line in covFH: f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0]", "num in nums) huReads = 0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths", "> 0): avgCov = cov/bases else: avgCov = 0 normCp = (avgCov) /", "normCp = (avgCov) / (huReads/1000) # viral genome coverage per 1000 human reads", "(huReads/1000) # viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp, huReads,", "for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for", "\"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads =", "# viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp, huReads, bases,", "for coverage of virus virDict = {} with open(args.g) as covFH: for line", "viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp, huReads, bases, avgCov,", "python3 import argparse parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\",", "= 0 normCp = (avgCov) / (huReads/1000) # viral genome coverage per 1000", "virDict[key][0] bases = virDict[key][1] if (cov > 0): avgCov = cov/bases else: avgCov", "int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of", "line.split() if str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]):", "int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for", "help=\"the bamindexstat file\") args = parser.parse_args() # this can be simplified by just", "huReads = 0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} #", "if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else:", "simplified by just importing the bam file and then running bedtools and bamindexstat", "type=str, help=\"the bamindexstat file\") args = parser.parse_args() # this can be simplified by", "avgCov = cov/bases else: avgCov = 0 normCp = (avgCov) / (huReads/1000) #", "parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args() # this can be simplified", "- int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict: cov", "path[-1].split(\".\") lengths = {} # stat file for human reads with open(args.s) as", "in covFH: f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1]", "\"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads = 0 path =", "str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) #", "\"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads = 0 path", "line in covFH: f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3])", "file\") args = parser.parse_args() # this can be simplified by just importing the", "int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict: cov =", "'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of virus virDict = {}", "in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]),", "help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args() #", "argparse parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the", "can be simplified by just importing the bam file and then running bedtools", "0): avgCov = cov/bases else: avgCov = 0 normCp = (avgCov) / (huReads/1000)", "genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp, huReads, bases, avgCov, cov,", "+ str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"])", "virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2])", "reads with open(args.s) as statFH: for line in statFH: f = line.split() if", "cov = virDict[key][0] bases = virDict[key][1] if (cov > 0): avgCov = cov/bases", "then processing those outputs nums = list(range(1, 23)) chroms = [\"chr\" + str(num)", "be simplified by just importing the bam file and then running bedtools and", "int(f[2]) - int(f[1])]}) for key in virDict: cov = virDict[key][0] bases = virDict[key][1]", "bam file and then running bedtools and bamindexstat then processing those outputs nums", "= args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} # stat file for human", "= [\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\",", "as covFH: for line in covFH: f = line.split() if str(f[0]) in virDict:", "virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args() # this", "parser.parse_args() # this can be simplified by just importing the bam file and", "0 normCp = (avgCov) / (huReads/1000) # viral genome coverage per 1000 human", "parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args =", "lengths = {} # stat file for human reads with open(args.s) as statFH:", "# stat file for human reads with open(args.s) as statFH: for line in", "avgCov = 0 normCp = (avgCov) / (huReads/1000) # viral genome coverage per", "in chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov", "for line in statFH: f = line.split() if str(f[0]) in chroms: huReads +=", "= line.split() if str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=':", "in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in", "= (avgCov) / (huReads/1000) # viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key,", "0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} # stat file", "= list(range(1, 23)) chroms = [\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\",", "with open(args.g) as covFH: for line in covFH: f = line.split() if str(f[0])", "covFH: f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] +=", "open(args.s) as statFH: for line in statFH: f = line.split() if str(f[0]) in", "argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args", "\"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads = 0 path = args.s.split(\"/\")", "this can be simplified by just importing the bam file and then running", "as statFH: for line in statFH: f = line.split() if str(f[0]) in chroms:", "bamindexstat file\") args = parser.parse_args() # this can be simplified by just importing", "human reads with open(args.s) as statFH: for line in statFH: f = line.split()", "for num in nums) huReads = 0 path = args.s.split(\"/\") ext = path[-1].split(\".\")", "= virDict[key][1] if (cov > 0): avgCov = cov/bases else: avgCov = 0", "huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for", "= line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) -", "virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key", "parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat", "and bamindexstat then processing those outputs nums = list(range(1, 23)) chroms = [\"chr\"", "running bedtools and bamindexstat then processing those outputs nums = list(range(1, 23)) chroms", "str(f[0]) in virDict: virDict[str(f[0])][0] += int(f[3]) virDict[str(f[0])][1] += int(f[2]) - int(f[1]) else: virDict.update({str(f[0]):", "by just importing the bam file and then running bedtools and bamindexstat then", "import argparse parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str,", "nums) huReads = 0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {}", "the bam file and then running bedtools and bamindexstat then processing those outputs", "[int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict: cov = virDict[key][0] bases =", "(avgCov) / (huReads/1000) # viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0],", "/ (huReads/1000) # viral genome coverage per 1000 human reads print(\"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\".format(key, ext[0], normCp,", "processing those outputs nums = list(range(1, 23)) chroms = [\"chr\" + str(num) for", "nums = list(range(1, 23)) chroms = [\"chr\" + str(num) for num in nums]", "statFH: f = line.split() if str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0])", "lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage of virus virDict = {} with", "virDict: cov = virDict[key][0] bases = virDict[key][1] if (cov > 0): avgCov =", "just importing the bam file and then running bedtools and bamindexstat then processing", "list(range(1, 23)) chroms = [\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\",", "+= int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])}) # mcpyv.cov file for coverage", "chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums) huReads", "mcpyv.cov file for coverage of virus virDict = {} with open(args.g) as covFH:", "nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num in nums)", "for key in virDict: cov = virDict[key][0] bases = virDict[key][1] if (cov >", "+= int(f[2]) - int(f[1]) else: virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in", "<gh_stars>1-10 #!/usr/bin/env python3 import argparse parser = argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph", "of virus virDict = {} with open(args.g) as covFH: for line in covFH:", "- int(f[1])]}) for key in virDict: cov = virDict[key][0] bases = virDict[key][1] if", "args = parser.parse_args() # this can be simplified by just importing the bam", "23)) chroms = [\"chr\" + str(num) for num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\",", "virDict = {} with open(args.g) as covFH: for line in covFH: f =", "in virDict: cov = virDict[key][0] bases = virDict[key][1] if (cov > 0): avgCov", "type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args()", "num in nums] chroms.extend([\"chrX\", \"chrY\", \"chrM\", \"X\", \"Y\", \"M\", \"MT\"]) chroms.extend(str(num) for num", "virDict.update({str(f[0]): [int(f[3]), int(f[2]) - int(f[1])]}) for key in virDict: cov = virDict[key][0] bases", "else: avgCov = 0 normCp = (avgCov) / (huReads/1000) # viral genome coverage", "= argparse.ArgumentParser() parser.add_argument(\"g\", type=str, help=\"The virus bedgraph file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\")", "in statFH: f = line.split() if str(f[0]) in chroms: huReads += int(f[4]) elif", "bamindexstat then processing those outputs nums = list(range(1, 23)) chroms = [\"chr\" +", "= {} with open(args.g) as covFH: for line in covFH: f = line.split()", "in nums) huReads = 0 path = args.s.split(\"/\") ext = path[-1].split(\".\") lengths =", "= {} # stat file for human reads with open(args.s) as statFH: for", "if str(f[0]) in chroms: huReads += int(f[4]) elif str(f[0]) != 'NoCoordinateCount=': lengths.update({str(f[0]): int(f[2])})", "file\") parser.add_argument(\"s\", type=str, help=\"the bamindexstat file\") args = parser.parse_args() # this can be", "coverage of virus virDict = {} with open(args.g) as covFH: for line in", "cov/bases else: avgCov = 0 normCp = (avgCov) / (huReads/1000) # viral genome", "{} with open(args.g) as covFH: for line in covFH: f = line.split() if", "and then running bedtools and bamindexstat then processing those outputs nums = list(range(1,", "chroms.extend(str(num) for num in nums) huReads = 0 path = args.s.split(\"/\") ext =", "for line in covFH: f = line.split() if str(f[0]) in virDict: virDict[str(f[0])][0] +=", "virDict[key][1] if (cov > 0): avgCov = cov/bases else: avgCov = 0 normCp", "= virDict[key][0] bases = virDict[key][1] if (cov > 0): avgCov = cov/bases else:", "if (cov > 0): avgCov = cov/bases else: avgCov = 0 normCp =", "line in statFH: f = line.split() if str(f[0]) in chroms: huReads += int(f[4])", "importing the bam file and then running bedtools and bamindexstat then processing those", "# this can be simplified by just importing the bam file and then", "open(args.g) as covFH: for line in covFH: f = line.split() if str(f[0]) in", "args.s.split(\"/\") ext = path[-1].split(\".\") lengths = {} # stat file for human reads" ]
[ "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel(", "primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "[ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()),", "models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ]", "('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',", "name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')),", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects',", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user',", "serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id',", "serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log',", "migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField( model_name='job',", "name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)),", "('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ],", "primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID',", "migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)),", "serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ],", "('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel(", "models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student',", "migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField(", "), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField(", "('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ),", "('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher',", "models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID',", "name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student',", "auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id',", "] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group',", "serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')),", "migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job',", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID',", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()),", "models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject',", "('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject',", "('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ),", "), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)),", "models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField(", "('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField(", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ],", "models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'),", "-*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations", "), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()),", "], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic',", "models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ),", "from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations =", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True,", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[", "], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ],", "import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from", "model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField( model_name='job', name='subject',", "], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ],", "models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)),", "name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)),", "models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[", "('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ],", "), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student',", "to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[", "model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log',", "serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL,", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel(", "models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id',", "primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),", "('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ),", "('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job',", "from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings", "utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf", "django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [", "models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark',", "from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies =", "), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ),", "null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ),", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID',", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[", "name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField( model_name='job', name='subject', field=models.ForeignKey(to='home.Subject'),", "auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID',", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id',", "<reponame>Andrew0701/web-coursework # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import", "name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher',", "-*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import", "django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [", "], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField(", "), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)),", "unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies", "], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number',", "primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')),", "= [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course',", "), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ),", "name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ),", "models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)),", "('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'),", "auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('group', models.CharField(max_length=5)),", "migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel(", "models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ),", "name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject',", "('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ],", "model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject',", "primary_key=True)), ('group', models.CharField(max_length=5)), ('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "('course', models.IntegerField()), ], ), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',", "auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id',", "models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)),", "migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations", "('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel( name='Subject', fields=[ ('id',", "name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL,", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic',", "through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')),", "migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname',", "('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID',", "auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)),", "primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id',", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ),", "('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job', through='home.Log')), ], ), migrations.CreateModel( name='Student_Subject', fields=[", "models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject',", "through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ),", "('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name',", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "migrations.CreateModel( name='Student_Subject', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('student', models.ForeignKey(to='home.Student')), ], ), migrations.CreateModel(", "field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'),", "('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel(", "import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel(", "fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group',", "models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'),", "migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname',", "auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')), ('jobs', models.ManyToManyField(to='home.Job',", "serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False,", "models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('subjects', models.ManyToManyField(to='home.Subject')), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True)), ], ), migrations.AddField( model_name='student_subject', name='subject',", "settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Group',", "migrations.AddField( model_name='student_subject', name='subject', field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects',", "migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student',", "models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('patronymic', models.CharField(max_length=100)), ('surname', models.CharField(max_length=100)), ('group', models.ForeignKey(null=True, to='home.Group')),", "], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date',", "serialize=False, auto_created=True, primary_key=True)), ('mark', models.IntegerField()), ('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student',", "import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration):", "models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)),", "models.CharField(max_length=100)), ('number', models.IntegerField(null=True, blank=True)), ], ), migrations.CreateModel( name='Log', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "('date', models.DateField()), ('job', models.ForeignKey(to='home.Job')), ], ), migrations.CreateModel( name='Student', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user',", "null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField( model_name='job', name='subject', field=models.ForeignKey(to='home.Subject'), ), ]", "), migrations.CreateModel( name='Job', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('number', models.IntegerField(null=True,", "field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True), ), migrations.AddField( model_name='log', name='student', field=models.ForeignKey(to='home.Student'), ), migrations.AddField( model_name='job', name='subject', field=models.ForeignKey(to='home.Subject'), ),", "__future__ import unicode_literals from django.db import models, migrations from django.conf import settings class", "field=models.ForeignKey(to='home.Subject'), ), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ),", "field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField( model_name='student', name='user', field=models.OneToOneField(to=settings.AUTH_USER_MODEL, null=True),", "auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ], ), migrations.CreateModel( name='Teacher', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True,", "), migrations.AddField( model_name='student_subject', name='teacher', field=models.ForeignKey(to='home.Teacher'), ), migrations.AddField( model_name='student', name='subjects', field=models.ManyToManyField(to='home.Subject', through='home.Student_Subject'), ), migrations.AddField(", "# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models," ]
[ "GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor]", "function to cluster the cells into donors. \"\"\" if random_seed is not None:", "import sys import itertools import numpy as np from scipy.stats import entropy from", "distribution. The doublet probability can be created by doublet genotypes \"\"\" if check_doublet:", "RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def", "if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior", "max_iter - 1: if verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it] -", "the parameters of each component of the variantional distribution. The doublet probability can", "theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]]", "ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT =", "Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD", "has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0],", "if LB[it] < LB[it - 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif", "## initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob,", "RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1]", ":, ig] * _digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm =", "* GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1]", "add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5,", "print(\"Warning: n_donor is smaller than samples in GT_prior, hence we \" \"ignore n_donor.\")", "theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of", "not complete, we change learn_GT to True.\") learn_GT = True elif GT_prior.shape[1] >", "LB[it] < LB[it - 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it", "GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD,", "None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 -", "Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init", "n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy()", "digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2", "# TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx =", "GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each component", "np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1))", "if it > min_iter: if LB[it] < LB[it - 1]: if verbose: print(\"Warning:", "check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to cluster", "ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor", "\"\"\" Vireo core function to cluster the cells into donors. \"\"\" if random_seed", "-np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return", "* (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 + S2 - SS)", "int(n_donor * (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV =", "GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how", "in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas", "Psi=None): \"\"\" \"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD", "= np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p", "n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1))", "it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior,", "(n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob']", "sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2]", "_add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob", "for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter])", "1: if verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it] - LB[it -", "1] ## GT_prob has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2", "= digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() *", "+ gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :])", "get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None: Psi =", "LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior,", "is smaller than samples in GT_prior, hence we \" \"ignore n_donor.\") n_donor =", "in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas", "decreases!\\n\") elif it == max_iter - 1: if verbose: print(\"Warning: VB did not", "of GT categories not matched: theta and GT_prior\") sys.exit(1) ## VB interations LB", "np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT =", "logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)", "is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor", "Vireo core function to cluster the cells into donors. \"\"\" if random_seed is", "ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor - 1)", "theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt =", "detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2,", "AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair", "normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None,", "Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\"", "(S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas) # +=", "Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor))", "GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than samples in GT_prior, hence we", ":, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:]", "Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob =", "GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT", "sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete", "1] < epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes,", "ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID", "GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior", "n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) /", "GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis)", "False: print(\"As GT_prior is not given, we change learn_GT to True.\") learn_GT =", "doublet probability can be created by doublet genotypes \"\"\" if check_doublet: GT_both =", "GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if", "if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :,", "DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 =", "matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories not", ":, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2)", "GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt =", "is None: Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor])", "else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 -", "= np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP,", "max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to cluster the cells", "= normalize(GT_prior) #TODO: check if there is a better way to deal with", "GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob,", "- 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] =", "+ S2_gt * _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob =", "else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete if", "[29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy()", "theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1]", "is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi", "matched: theta and GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it", "- 1] < epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2, GT_prob,", "digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt", "LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet)", "= np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and", "verbose: print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter - 1: if verbose:", "theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD", "= min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair)", "update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob =", ":, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\"", "import entropy from scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def", "DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior is not given,", "logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior", "RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes']", "= (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] +", "= GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet']", "3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior has < 2", "1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter - 1:", "combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter]) _theta_p1 =", "s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes:", "np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob,", "categories: 0, 1, 2, 1.5, 2.5 TODO: New GT has six categories: 0,", "* GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None):", "return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True,", "it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior,", "for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1,", "range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas =", "calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta", "GT_prior is not complete, we change learn_GT to True.\") learn_GT = True elif", "check_doublet=False): \"\"\" Update the parameters of each component of the variantional distribution. The", "add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their probability: New GT has five", "n_donor is smaller than samples in GT_prior, hence we \" \"ignore n_donor.\") n_donor", "DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig", "http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy as np from scipy.stats", "GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in", "GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :,", "(GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:,", "AD.shape[0] # n_variants ## initialize thete if theta_prior is None: #theta_prior = np.array([[0.3,", "min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if there is", "KL_GT, KL_theta) return LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\"", "\"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt is matched to GT_prior", "created by doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes)", "np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :,", "for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1,", "* _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID +=", "_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose()", "normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior", "< epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet", "Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy as", "ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]):", "= int(n_donor * (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV", "paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\"", "== max_iter - 1: if verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it]", "GT_prob, theta_prior) ### check how to calculate lower bound for when detecting doublets", "initialize Psi if Psi is None: Psi = np.ones(n_donor) / n_donor else: Psi", "3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig,", "= itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:,", "= VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val", "# check if n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error:", "29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]])", "= gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in", "(np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both", "def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\"", "ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta =", "theta_shapes.shape[0] # number of genotype categories ## initialize Psi if Psi is None:", "= True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior >", "ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None):", "not converge!\\n\") elif LB[it] - LB[it - 1] < epsilon_conv: break ## one-off", "[50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2:", "logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt", "for x in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2 =", "loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None,", "GT categories not matched: theta and GT_prior\") sys.exit(1) ## VB interations LB =", "SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig,", "theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else", "learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it - 1]: if", "not given, we change learn_GT to True.\") learn_GT = True else: GT_prob =", "than samples in GT_prior, hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] #", "None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID =", "Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1],", "KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes,", "how to calculate lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob,", "if n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error:", "is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID", "sp_idx = np.array([x for x in combn_iter]) # sample combination s_idx1 = sp_idx[:,", "db_idx = np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 =", "1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:,", "Add doublet genotype by summarizing their probability: New GT has five categories: 0,", "> min_iter: if LB[it] < LB[it - 1]: if verbose: print(\"Warning: Lower bound", "+ Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP,", "gt_idx = np.array([x for x in combn_iter]) # GT combination g_idx1 = gt_idx[:,", "from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None,", "# += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2)", "get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to calculate lower bound for", "GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\"", "hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt is", "LB[it] - LB[it - 1] < epsilon_conv: break ## one-off check doublet if", "= np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt =", "parameters of each component of the variantional distribution. The doublet probability can be", "axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes,", "theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add", "= np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID =", "for Vireo model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic", "= -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID -", "= itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter]) # sample combination", "from scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP,", "and GT=1&2 by averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7],", "= theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var,", "True.\") learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than", "np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy())", "theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False):", "+ np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob,", "np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig,", "get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior is not", "as np from scipy.stats import entropy from scipy.special import digamma from .vireo_base import", "theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob =", "RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet", "is False: print(\"As GT_prior is not complete, we change learn_GT to True.\") learn_GT", "GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1,", "learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it - 1]:", "GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] <", "ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1],", "if Psi is None: Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor]", "g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 =", "2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] #", "S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() *", "GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior,", "KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype:", "_theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) +", "for x in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2 =", "gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x", "check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it - 1]: if verbose:", "None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) /", "s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1],", "GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p", "\"\"\" if random_seed is not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape)", "GT_prior.shape[1] # check if n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt:", "np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 =", "RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi,", "* np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0)", ":, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas)", "= np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50],", "n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False:", "in GT_prior, hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if", "/ theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt", "theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt", "beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes)", "else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob)", "GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt,", "learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None,", "genotype categories ## initialize Psi if Psi is None: Psi = np.ones(n_donor) /", "True.\") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP", "n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete if theta_prior", "return LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta", "LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for", "GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters Example ------- theta_shapes =", "Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it", "len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig],", "theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT -", "\"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter]) #", "= np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:,", "= GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both)", "\"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt", "learn_GT is False: print(\"As GT_prior is not complete, we change learn_GT to True.\")", "update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update", "GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior", "n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1]", "= LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes,", "axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior =", "https://github.com/allentran/pca-magic import sys import itertools import numpy as np from scipy.stats import entropy", "np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p =", ":].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt * _digmma2", "GT=1&2 by averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3,", "change learn_GT to True.\") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior <", "(GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] *", "- AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig,", "Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob,", "2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for", "* np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior,", "= get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob,", "LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob,", "axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID,", "Vireo model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import", "np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob,", "1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob", "= np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob']", "_theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype", "* (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig]", "GT_prior = normalize(GT_prior) #TODO: check if there is a better way to deal", "for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes,", "= normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is", "= normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2) return np.append(GT_prob1, GT_prob2,", "if random_seed is not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) <", "Lower bound decreases!\\n\") elif it == max_iter - 1: if verbose: print(\"Warning: VB", "samples in GT_prior, hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check", "vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20,", "if there is a better way to deal with GT imcompleteness if GT_prior.shape[1]", "n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print(\"As GT_prior is", "[99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior =", "= (S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas) #", "1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :,", "_digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior),", ":, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :,", "Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else:", "_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 +", "= theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3", "<NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import", "itertools import numpy as np from scipy.stats import entropy from scipy.special import digamma", "= get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob,", "donors. \"\"\" if random_seed is not None: np.random.seed(random_seed) if n_donor is None: if", "break ## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD,", "update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it", "np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2:", "normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None):", "-beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT", "ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID *", "GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP * ID_prob", "return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\"", "n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2]", "2.5 TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2", "True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than samples in GT_prior,", "== 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var,", "= ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor -", "doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each component of the", "= SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis =", "0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]),", "with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior", "_theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean *", "= DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for", "genotype by summarizing their probability: New GT has five categories: 0, 1, 2,", "1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter])", "SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob", "if verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it] - LB[it - 1]", "# GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter =", "theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob,", "= theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2,", "g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx", "30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy as np", "check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi,", "to True.\") learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller", "1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig]", "thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])", "0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx =", "is not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3 or", "_digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob),", "GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:,", "GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1]", "GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]),", "- KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1,", "2, 1.5, 2.5 TODO: New GT has six categories: 0, 1, 2, 0_1,", "= np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum", "GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior has < 2 donors.\") sys.exit(1)", "np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0]", ":, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return", "* _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS =", "if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to", "LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0],", "np from scipy.stats import entropy from scipy.special import digamma from .vireo_base import normalize,", "ig] = (S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas)", "in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior,", "and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes,", "KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2", "# print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT - KL_theta", "epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to cluster the cells into donors.", "genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1]", "(1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both", "None: Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if", ":GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta:", "ID_prob, GT_prob, theta_prior) ### check how to calculate lower bound for when detecting", "five categories: 0, 1, 2, 1.5, 2.5 TODO: New GT has six categories:", ":, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1", "if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD,", "it == max_iter - 1: if verbose: print(\"Warning: VB did not converge!\\n\") elif", "is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape)", ":, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :,", "= itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter]) # GT combination", "if learn_GT is False: print(\"As GT_prior is not given, we change learn_GT to", "1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt * _digmma2 -", "np.array([x for x in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2", "/ n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob", "doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair =", "0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x", "theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check", "GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1", "[29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for relatives combn_iter =", "1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 =", "n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is", "logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP,", "better way to deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n =", "0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for", "n_var = AD.shape[0] # n_variants ## initialize thete if theta_prior is None: #theta_prior", "combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter]) # GT", "\"\"\" Update the parameters of each component of the variantional distribution. The doublet", "2) db_idx = np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2", "_digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose()", "None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if", "GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig,", "elif it == max_iter - 1: if verbose: print(\"Warning: VB did not converge!\\n\")", ":][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:,", "# sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob", "= DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 + S2", "ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes,", "genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters Example ------- theta_shapes", "if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior /", "learn_GT to True.\") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP]", "logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT:", "if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP -", "theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode", "bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior,", "theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior", "combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter]) # sample", "n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior", "None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes,", "to cluster the cells into donors. \"\"\" if random_seed is not None: np.random.seed(random_seed)", "# Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy", "not matched: theta and GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for", "S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis", "GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0]", "S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() *", "= 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if there is a", "GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:,", "= digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1)", "- GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if", "GT_prob has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0],", "np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for", "complete, we change learn_GT to True.\") learn_GT = True elif GT_prior.shape[1] > n_donor:", "doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to", "thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]])", "50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior", "\"\"\" S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt =", "= sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes: 0,", "GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2) return np.append(GT_prob1,", "# Core functions for Vireo model # Author: <NAME> # Date: 30/08/2019 #", "is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1", "= gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx =", "S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1", "Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core", "digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt *", "theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters", "by averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3],", "= theta_shapes.shape[0] # number of genotype categories ## initialize Psi if Psi is", "ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig,", "doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi *", ":][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2)", "= sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1, 2; n_gt =", "+= np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:,", "ig] * _digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm = np.log(Psi", "categories ## initialize Psi if Psi is None: Psi = np.ones(n_donor) / n_donor", "of each component of the variantional distribution. The doublet probability can be created", "of genotype categories ## initialize Psi if Psi is None: Psi = np.ones(n_donor)", "theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2),", "smaller than samples in GT_prior, hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1]", "np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return", "if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi", "= normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor,", "learn_GT is False: print(\"As GT_prior is not given, we change learn_GT to True.\")", "* (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy()", "is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1,", "axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print(\"As GT_prior is not complete,", "2) sp_idx = np.array([x for x in combn_iter]) # sample combination s_idx1 =", "in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1]", "GT_prior, hence we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt", "1.5, 2.5 TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2,", "/ np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT", "normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None:", "deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1]", "2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2)", "add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None:", "doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters Example -------", "= AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:,", "+= (S1 + S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob =", "of the variantional distribution. The doublet probability can be created by doublet genotypes", "1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if", "range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi,", "DP, ID_prob, GT_prob, theta_prior) ### check how to calculate lower bound for when", "g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1])", "s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1,", "ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ###", "six categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2)", ":, ig] = (S1_gt * _digmma1 + S2_gt * _digmma2 - SS_gt *", "loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID,", "digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:,", "doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 - doublet_prior),", "ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt", "we \" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt is matched", "(GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] *", "Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:]", "sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob,", ":n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor *", "for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both)", "be created by doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both =", "theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their probability: New", "sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1,", "# n_variants ## initialize thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7],", "np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob,", "add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging", "genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt +", "GT_prior.copy() if learn_GT is False: print(\"As GT_prior is not complete, we change learn_GT", "GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob", "learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than samples", "(GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm", "GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is", "= GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] =", "TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x", "2: print(\"Error: no n_donor and GT_prior has < 2 donors.\") sys.exit(1) else: n_donor", "= theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:,", "categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx", "we change learn_GT to True.\") learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning:", "= _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet", "< LB[it - 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it ==", "doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior,", "= LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None,", "GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2, :][:,", "ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt", "- 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter -", "_digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1", "scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None,", "DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100,", "GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2,", "- min_GP GT_prior = normalize(GT_prior) #TODO: check if there is a better way", "< n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)),", "random_seed is not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3", "GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP]", "doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV", "else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1]", "is not complete, we change learn_GT to True.\") learn_GT = True elif GT_prior.shape[1]", "is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,", "range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas =", "BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :,", "DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:,", "1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP,", "3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for relatives combn_iter", "[3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for relatives", "= np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior,", "axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their probability: New GT", "1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x", "keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db,", "if learn_GT is False: print(\"As GT_prior is not complete, we change learn_GT to", "= DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for", "ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes =", "print(\"Error: number of GT categories not matched: theta and GT_prior\") sys.exit(1) ## VB", "combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has three", "has < 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] #", "is a better way to deal with GT imcompleteness if GT_prior.shape[1] < n_donor:", "is False: print(\"As GT_prior is not given, we change learn_GT to True.\") learn_GT", "= BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS = DP.transpose() * (GT_prob[:,", "add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2)", "None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] <", "learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\"", "/ 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db =", "gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:,", "GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT,", "converge!\\n\") elif LB[it] - LB[it - 1] < epsilon_conv: break ## one-off check", "= normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None,", "#TODO: check if there is a better way to deal with GT imcompleteness", "LB[it - 1] < epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2,", "is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt", "GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP", "KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes):", "= theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum", "< min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP", "- SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1))", "check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if", "if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes", "GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt = DP *", "#theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50,", "2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ##", "# https://github.com/allentran/pca-magic import sys import itertools import numpy as np from scipy.stats import", "imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior,", "interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] =", "ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt", "axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob):", "= np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 =", "theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta,", "axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2,", "ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)", "= GT_prior.shape[1] # check if n_gt is matched to GT_prior if GT_prior.shape[2] !=", "2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories ## initialize", "theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) ==", "axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db", "2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] =", "\"\"\" \"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior =", "0, 1, 2, 1.5, 2.5 TODO: New GT has six categories: 0, 1,", "min_iter: if LB[it] < LB[it - 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\")", "## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP,", "LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD,", "to True.\") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] =", "doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID", "= GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] =", "= GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete if theta_prior is", "number of genotype categories ## initialize Psi if Psi is None: Psi =", "the variantional distribution. The doublet probability can be created by doublet genotypes \"\"\"", "= normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if", "- LB[it - 1] < epsilon_conv: break ## one-off check doublet if check_doublet:", "learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each component of the variantional", "[3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes", "> n_donor: print(\"Warning: n_donor is smaller than samples in GT_prior, hence we \"", "variantional distribution. The doublet probability can be created by doublet genotypes \"\"\" if", "GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:,", "np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9,", "learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes =", "### check how to calculate lower bound for when detecting doublets LB_val =", "initialize thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7,", "= np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2", "theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD,", "Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi", "if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior", "S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1)", "by summarizing their probability: New GT has five categories: 0, 1, 2, 1.5,", "itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter]) # sample combination s_idx1", "2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :,", "print(\"As GT_prior is not given, we change learn_GT to True.\") learn_GT = True", "= np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False:", "LB[it - 1]: if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter", "np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1)", "def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior", "probability can be created by doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob)", "TODO: New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\"", "cells into donors. \"\"\" if random_seed is not None: np.random.seed(random_seed) if n_donor is", "numpy as np from scipy.stats import entropy from scipy.special import digamma from .vireo_base", "in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0]", "+= np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return", "sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ## GT_prob has", "def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob", "None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt =", "= GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt = DP *", "Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each component of", "n_variants ## initialize thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3,", "= (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True)", "= np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both", "normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print(\"As GT_prior", "= np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def", "GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000)", "has six categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]),", "= (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:,", "LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes,", "SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob", "n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of GT", "ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]):", "return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their", "print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter - 1: if verbose: print(\"Warning:", "= SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 =", "= ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior)", "we change learn_GT to True.\") learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior", "Psi is None: Psi = np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] /", ":] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :,", "else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior =", "sys import itertools import numpy as np from scipy.stats import entropy from scipy.special", "sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:,", "= ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt =", "= doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return", "GT_prior) if learn_GT is False: print(\"As GT_prior is not given, we change learn_GT", "DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the", "when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return", "n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error: no", "_theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing", "## initialize thete if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3],", "GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior =", "theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob,", "/ GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig", "theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2)", "print(\"As GT_prior is not complete, we change learn_GT to True.\") learn_GT = True", "for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean", "if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape)", "s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2,", "LB_doublet return RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True,", "learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each component of the variantional distribution.", "GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] *", "= np.array([x for x in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0]", "n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001,", "= AD.shape[0] # n_variants ## initialize thete if theta_prior is None: #theta_prior =", "there is a better way to deal with GT imcompleteness if GT_prior.shape[1] <", "GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories not matched: theta and GT_prior\")", "GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True,", "= np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD *", "= np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced", "model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys", "0] s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1, 2;", "GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both =", "LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD *", "= digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] =", "= np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories", ":][:, :, g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:,", "GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0],", "ig] * _digmma2) SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID", "GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior", "s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2,", "if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior,", "else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) / 2)", ":].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 =", "0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:,", "* ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta", "for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else None", "_axis = 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt", "doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob,", "ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[:", "RV def update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False):", "axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories ## initialize Psi if", "theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP,", "= np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :]", "DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt =", "/ ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID", "= n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob =", "> 1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check", "axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if", "= GT_prior.copy() if learn_GT is False: print(\"As GT_prior is not complete, we change", "n_donor_doublt = int(n_donor * (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt))", "= ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] =", "np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape)", "AD * ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt", "= -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior)", "axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def", "= np.array([x for x in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0]", "= Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else:", "ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function", "-np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) #", "* (n_donor - 1) / 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {}", "None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] +=", "summarizing their probability: New GT has five categories: 0, 1, 2, 1.5, 2.5", "logLik_ID += (S1 + S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob", "s_idx2 = sp_idx[:, 1] ## GT_prob has three genotypes: 0, 1, 2; n_gt", "s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:,", "GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if", "probability: New GT has five categories: 0, 1, 2, 1.5, 2.5 TODO: New", "theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2)", "np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their probability:", "ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes", "ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi)) LB_p = np.sum(logLik_ID * ID_prob) KL_ID", "* GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] *", "check how to calculate lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID,", "Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD,", "s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:,", "\"\"\" \"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is", "logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None:", "* _digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm = np.log(Psi /", "support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for", "n_donor = GT_prior.shape[1] # check if n_gt is matched to GT_prior if GT_prior.shape[2]", "S2_gt * _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT", "x in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:,", "AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :,", "normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2) return np.append(GT_prob1, GT_prob2, axis=1)", "_digmmas) logLik_ID += (S1 + S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi))", "print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID - KL_GT - KL_theta def", "None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9],", "given, we change learn_GT to True.\") learn_GT = True else: GT_prob = GT_prior.copy()", "doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob", "np.array([x for x in combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2", "ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes =", "ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True, min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo", "verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it] - LB[it - 1] <", "np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] *", "ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior", "into donors. \"\"\" if random_seed is not None: np.random.seed(random_seed) if n_donor is None:", "theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to calculate lower", "elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than samples in GT_prior, hence", "SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy()", "* ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT", "False: print(\"As GT_prior is not complete, we change learn_GT to True.\") learn_GT =", "_digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig,", "entropy from scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD,", "three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt", "get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior =", "verbose=False): \"\"\" Vireo core function to cluster the cells into donors. \"\"\" if", "DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0],", "can be created by doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both", "to calculate lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2,", "Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob,", "return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is", "normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt)))", "np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes,", "= update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if", "n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy()", "theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1))", "1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt *", "n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number", "import itertools import numpy as np from scipy.stats import entropy from scipy.special import", "n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob =", "if GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories not matched: theta and", "S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape)", "1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if there is a better", "print(\"Error: no n_donor and GT_prior has < 2 donors.\") sys.exit(1) else: n_donor =", "= digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1 + S2_gt", "if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1]", "np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD,", "epsilon_conv: break ## one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet =", ":][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])),", "min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior", "- S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1 if", "= LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) / 2) doublet_prob =", "2) gt_idx = np.array([x for x in combn_iter]) # GT combination g_idx1 =", "\"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior", "theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior is not given, we change", "min_iter=20, max_iter=100, min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to cluster the", "= {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] =", "DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it >", "ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if", "range(theta_shapes.shape[0]): _axis = 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] +=", "check if there is a better way to deal with GT imcompleteness if", "way to deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor", "combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2)", "GT_prior is not given, we change learn_GT to True.\") learn_GT = True else:", "doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else:", "import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None,", "not None: np.random.seed(random_seed) if n_donor is None: if len(GT_prior.shape) < 3 or GT_prior.shape[1]", "min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior)", "theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None:", "29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT for", "= 1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt *", "ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is", "= loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def", "Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) *", "------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO:", "RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list']", "GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior):", "* doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2,", "ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt = DP", "ID_prob = normalize(ID_prob_init.copy()) ## initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var,", "KL_theta) return LB_p - KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate", "their probability: New GT has five categories: 0, 1, 2, 1.5, 2.5 TODO:", "GT has five categories: 0, 1, 2, 1.5, 2.5 TODO: New GT has", "n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob", "ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior is not given, we", "Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob", "to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories not matched:", "get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt", "axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes,", "from scipy.stats import entropy from scipy.special import digamma from .vireo_base import normalize, loglik_amplify,", "GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter =", "or GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior has < 2 donors.\")", "1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1,", "2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean", "* (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig]", "averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7,", "elif LB[it] - LB[it - 1] < epsilon_conv: break ## one-off check doublet", "LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True)", "np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None:", "doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy()", "_digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob", "ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p,", "/ 100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair *", "* ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in", "0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if", "itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in combn_iter]) # GT combination g_idx1", "to deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n = n_donor -", "0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0]))", "n_gt = GT_prob.shape[2] GT_prob2 = np.zeros((GT_prob.shape[0], sp_idx.shape[0], n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt]", "learn_GT = True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior", "theta_prior): \"\"\" \"\"\" S1_gt = AD * ID_prob SS_gt = DP * ID_prob", "n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories ## initialize Psi", "\"\"\" # TODO: support reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx", "+ GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2", "GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT", "ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1)", "\" \"ignore n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt is matched to", "VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def", "bound decreases!\\n\") elif it == max_iter - 1: if verbose: print(\"Warning: VB did", "np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype categories ##", "theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it]", "GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD", "GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior =", "theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum =", "1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter]) #", "VB interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes, LB[it]", "if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes", "return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior): \"\"\" \"\"\"", "# Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import", "New GT has five categories: 0, 1, 2, 1.5, 2.5 TODO: New GT", "and GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it in range(max_iter):", "1 if len(theta_shapes.shape) == 3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:,", "calculate lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes,", "= update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob", "\"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP", "* _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT +", "by doublet genotypes \"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair", "(GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1,", "True else: GT_prob = GT_prior.copy() GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1", "* _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by", "theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of genotype", "* ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in", "np.sum(logLik_ID * ID_prob) KL_ID = -np.sum(entropy(ID_prob, ID_prior, axis=1)) KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2))", "combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1] combn_iter", "donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize", "BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]):", "Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools", "if theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior", "len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2),", "def add_doublet_GT(GT_prob): \"\"\" Add doublet genotype by summarizing their probability: New GT has", "= normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\"", "n_donor and GT_prior has < 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var", "/ np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob =", "theta_prior is None: #theta_prior = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) theta_prior =", "GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT =", "= min_GP GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior =", "1) S1 = AD.transpose() * (GT_prob[:, :, ig] * _digmma1) S2 = BD.transpose()", "axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes,", "combn_iter]) # sample combination s_idx1 = sp_idx[:, 0] s_idx2 = sp_idx[:, 1] ##", "GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As", "## GT_prob has three genotypes: 0, 1, 2; n_gt = GT_prob.shape[2] GT_prob2 =", "DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior)", "0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1", "< 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0] # n_variants", "= np.ones(n_donor) / n_donor else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is", ":]) GT_prob2[:, :, n_gt:] = (GT_prob[:, s_idx1, :][:, :, g_idx1] * GT_prob[:, s_idx2,", "- KL_ID - KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet", "else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID =", "np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return ID_prob,", "np.log(GT_prior), axis=2) GT_prob = normalize(np.exp(GT_prob), axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob,", "axis=2) return GT_prob, logLik_GT def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\"", "relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter]) _theta_p1", "initialize GT if GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT", "VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None:", "learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet =", "= GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] /", "DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT =", "axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p -", "random_seed=None, verbose=False): \"\"\" Vireo core function to cluster the cells into donors. \"\"\"", "GT_prior is None: GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP,", "GT=0&2, and GT=1&2 by averaging thire beta paramters Example ------- theta_shapes = np.array([[0.3,", "# number of genotype categories ## initialize Psi if Psi is None: Psi", "Example ------- theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" #", "len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior has", "GT_prior = normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior)", "(S1 + S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID", "doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if LB[it] < LB[it -", "ID_prob, GT_prob, theta_shapes, LB[it] = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,", "has five categories: 0, 1, 2, 1.5, 2.5 TODO: New GT has six", "= Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP,", "= True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is smaller than samples in", "GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1]) /", "\"\"\" if check_doublet: GT_both = add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] -", "get_ID_prob(AD, DP, GT_both, theta_both, Psi_both) ID_prob = ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT", "beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None,", "_theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0", ":, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :,", "- GT_prob.shape[1] if doublet_prior is None: doublet_prior = min(0.5, AD.shape[1] / 100000) Psi_both", ":n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2, :]) GT_prob2[:, :, n_gt:] =", "- 1: if verbose: print(\"Warning: VB did not converge!\\n\") elif LB[it] - LB[it", "= add_doublet_GT(GT_prob) theta_both = add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is", "DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1])", "GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories not matched: theta", "n_gt: print(\"Error: number of GT categories not matched: theta and GT_prior\") sys.exit(1) ##", "theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP,", "- SS_gt * _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2)", "3], [29.7, 0.3]]) theta_prior = np.array([[0.1, 99.9], [50, 50], [99.9, 0.1]]) theta_shapes =", "np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both =", "in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1,", "check if n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number", "no n_donor and GT_prior has < 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1]", "= get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to calculate lower bound", "theta_shapes = np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support", "_digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :, ig] *", "SS = DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 +", "!= n_gt: print(\"Error: number of GT categories not matched: theta and GT_prior\") sys.exit(1)", "\"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape), axis=2) if Psi is None:", "## VB interations LB = np.zeros(max_iter) for it in range(max_iter): ID_prob, GT_prob, theta_shapes,", "GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] =", "DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig", "AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1,", "None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi /", "GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD,", "ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD, DP,", "New GT has six categories: 0, 1, 2, 0_1, 0_2, 1_2 \"\"\" combn_iter", "ig] * _digmma1) S2 = BD.transpose() * (GT_prob[:, :, ig] * _digmma2) SS", "0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) == 2: theta_prior = np.repeat(np.expand_dims(theta_prior,", "_theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1,", "\"\"\" Add doublet genotype by summarizing their probability: New GT has five categories:", "GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter:", "core function to cluster the cells into donors. \"\"\" if random_seed is not", "theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob =", "SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt logLik_GT = np.zeros(GT_prior.shape)", "is matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of GT categories", "for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire beta paramters Example", "theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob", "component of the variantional distribution. The doublet probability can be created by doublet", "Core functions for Vireo model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca", "if n_gt is matched to GT_prior if GT_prior.shape[2] != n_gt: print(\"Error: number of", "ID_prob2[:, :GT_prob.shape[1]] if learn_GT: GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if", "np.sum(_theta_p2, axis=1, keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def", "_add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print(\"As GT_prior is not", "DP.transpose() * (GT_prob[:, :, ig] * _digmmas) logLik_ID += (S1 + S2 -", "99.9], [50, 50], [99.9, 0.1]]) theta_shapes = theta_prior.copy() if ASE_mode and len(theta_prior.shape) ==", "theta_prior) ### check how to calculate lower bound for when detecting doublets LB_val", "RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD, DP, GT_prob,", "digamma from .vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True,", "n_gt = theta_shapes.shape[0] # number of genotype categories ## initialize Psi if Psi", "if verbose: print(\"Warning: Lower bound decreases!\\n\") elif it == max_iter - 1: if", "100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior))", "0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt *", "+= np.sum(S2_gt * GT_prob[:, :, ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob,", "GT_prior.shape[1] n_var = AD.shape[0] # n_variants ## initialize thete if theta_prior is None:", "categories not matched: theta and GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter)", "did not converge!\\n\") elif LB[it] - LB[it - 1] < epsilon_conv: break ##", "/ np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob, axis=1) return", ":, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2)", "0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) /", "1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) S1 = AD.transpose() * (GT_prob[:, :,", "the cells into donors. \"\"\" if random_seed is not None: np.random.seed(random_seed) if n_donor", "S1_gt = AD * ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt", "Psi if Psi is None: Psi = np.ones(n_donor) / n_donor else: Psi =", "{} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] = doublet_prob RV['theta_shapes'] = theta_shapes", "def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi is None: Psi", "def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by", "Update the parameters of each component of the variantional distribution. The doublet probability", "gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x in combn_iter])", "normalize(np.ones((n_var, n_donor, n_gt))) GT_prob, logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT", "* GT_prob[:, :, ig], axis=_axis) theta_shapes[ig, 1] += np.sum(S2_gt * GT_prob[:, :, ig],", "GT_prior[GT_prior > 1 - min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO:", "< 3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor and GT_prior has <", "KL_GT - KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2,", "= np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1]))", "def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False, Psi=None, ID_prob_init=None, doublet_prior=None, check_doublet=True,", "< 2: print(\"Error: no n_donor and GT_prior has < 2 donors.\") sys.exit(1) else:", "= -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta)", "- KL_theta def add_doublet_theta(theta_shapes): \"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2, and", "\"\"\" calculate theta for doublet genotype: GT=0&1, GT=0&2, and GT=1&2 by averaging thire", "min(0.5, AD.shape[1] / 100000) Psi_both = np.append(Psi * (1 - doublet_prior), (np.ones(n_doublet_pair) /", "ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior,", "functions for Vireo model # Author: <NAME> # Date: 30/08/2019 # http://edwardlib.org/tutorials/probabilistic-pca #", "np.array([[0.3, 29.7], [3, 3], [29.7, 0.3]]) add_doublet_theta(theta_shapes) \"\"\" # TODO: support reduced GT", "np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]]", "else: Psi = Psi[:n_donor] / np.sum(Psi[:n_donor]) if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1],", "number of GT categories not matched: theta and GT_prior\") sys.exit(1) ## VB interations", "ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize", "VB did not converge!\\n\") elif LB[it] - LB[it - 1] < epsilon_conv: break", "change learn_GT to True.\") learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor", "GT_prior[GT_prior < min_GP] = min_GP GT_prior[GT_prior > 1 - min_GP] = 1 -", "digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1)", "theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=learn_GT, learn_theta=learn_theta, check_doublet=check_doublet) if it > min_iter: if", "theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0]))", "logLik_GT = get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior", "= theta_shapes RV['LB_list'] = LB[: it+1] RV['LB_doublet'] = LB_doublet return RV def update_VB(AD,", "logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2", "theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior = normalize(np.ones(GT_prob.shape),", "ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is None: GT_prior =", "KL_GT = -np.sum(entropy(GT_prob, GT_prior, axis=2)) KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT,", "\"\"\" \"\"\" if Psi is None: Psi = np.ones(GT_prob.shape[1]) / GT_prob.shape[1] BD =", "normalize(GT_prior) #TODO: check if there is a better way to deal with GT", "np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob'] = GT_prob RV['doublet_prob'] =", "ig], axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if", "GT_prob = GT_prior.copy() if learn_GT is False: print(\"As GT_prior is not complete, we", "one-off check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob,", "n_gt + gt_idx.shape[0])) GT_prob2[:, :, :n_gt] = (GT_prob[:, s_idx1, :] * GT_prob[:, s_idx2,", "* _digmma1 + S2_gt * _digmma2 - SS_gt * _digmmas) # += np.log(GT_prior)", "= np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm, axis=1)) ID_prob = normalize(ID_prob,", "theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob SS_gt =", "1, 2, 1.5, 2.5 TODO: New GT has six categories: 0, 1, 2,", "theta and GT_prior\") sys.exit(1) ## VB interations LB = np.zeros(max_iter) for it in", "- S1_gt logLik_GT = np.zeros(GT_prior.shape) for ig in range(logLik_GT.shape[2]): _digmma1 = digamma(theta_shapes[ig, 0]).reshape(-1,", "KL_theta = -beta_entropy(theta_shapes, theta_prior) # print(LB_p, KL_ID, KL_GT, KL_theta) return LB_p - KL_ID", "* _digmmas) # += np.log(GT_prior) GT_prob = loglik_amplify(logLik_GT + np.log(GT_prior), axis=2) GT_prob =", "return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\" if GT_prior", "a better way to deal with GT imcompleteness if GT_prior.shape[1] < n_donor: _add_n", "GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both, theta_both,", "GT_prob[:, s_idx1, :][:, :, g_idx2] * GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 =", "lower bound for when detecting doublets LB_val = VB_lower_bound(logLik_ID, GT_prob, ID_prob2, theta_shapes, theta_prior,", "it > min_iter: if LB[it] < LB[it - 1]: if verbose: print(\"Warning: Lower", "if GT_prior.shape[1] < n_donor: _add_n = n_donor - GT_prior.shape[1] GT_prior = np.append(GT_prior, normalize(np.ones((n_var,", "theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior, learn_GT=True, learn_theta=learn_theta,", "Psi.copy() GT_both = GT_prob.copy() theta_both = theta_shapes.copy() ID_prob2, logLik_ID = get_ID_prob(AD, DP, GT_both,", "and GT_prior has < 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var =", "+ S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID +", "np.ones((AD.shape[0], ID_prob.shape[1], theta_shapes.shape[0])) GT_prior = GT_prior / theta_shapes.shape[0] S1_gt = AD * ID_prob", "import numpy as np from scipy.stats import entropy from scipy.special import digamma from", "* GT_prob[:, s_idx2, :][:, :, g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob,", "= normalize(np.ones(GT_prob.shape), axis=2) if Psi is None: ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else:", "- min_GP] = 1 - min_GP GT_prior = normalize(GT_prior) #TODO: check if there", "min_GP GT_prior = normalize(GT_prior) #TODO: check if there is a better way to", "normalize(ID_prob, axis=1) return ID_prob, logLik_ID def get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior=None): \"\"\" \"\"\"", "ID_prior = np.ones(ID_prob.shape) / ID_prob.shape[1] else: ID_prior = np.ones(ID_prob.shape) * np.log(Psi / np.sum(Psi))", "- doublet_prior), (np.ones(n_doublet_pair) / n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both =", "LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor - 1) / 2) doublet_prob", "None: if len(GT_prior.shape) < 3 or GT_prior.shape[1] < 2: print(\"Error: no n_donor and", "GT_prob.shape[1] BD = DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in", "S2 - SS) Psi_norm = np.log(Psi / np.sum(Psi)) ID_prob = np.exp(loglik_amplify(logLik_ID + Psi_norm,", "GT_prob, ID_prob2, theta_shapes, theta_prior, GT_prior, Psi_both) return ID_prob2, GT_prob, theta_shapes, LB_val def get_theta_shapes(AD,", "is not given, we change learn_GT to True.\") learn_GT = True else: GT_prob", "is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ## initialize GT", "print(\"Warning: VB did not converge!\\n\") elif LB[it] - LB[it - 1] < epsilon_conv:", "+ normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1,", "def VB_lower_bound(logLik_ID, GT_prob, ID_prob, theta_shapes, theta_prior, GT_prior=None, Psi=None): \"\"\" \"\"\" if GT_prior is", "x in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:,", "keepdims=True)) theta_shapes_db = _theta_mean * _theta_sum return np.append(theta_shapes, theta_shapes_db, axis=0) def add_doublet_GT(GT_prob): \"\"\"", "learn_GT to True.\") learn_GT = True elif GT_prior.shape[1] > n_donor: print(\"Warning: n_donor is", "= add_doublet_theta(theta_shapes) n_doublet_pair = GT_both.shape[1] - GT_prob.shape[1] if doublet_prior is None: doublet_prior =", "g_idx1]) GT_prob2 = normalize(GT_prob2, axis=2) GT_prob1 = np.append(GT_prob, np.zeros((GT_prob.shape[0], GT_prob.shape[1], gt_idx.shape[0])), axis=2) return", "g_idx1] * GT_prob[:, s_idx2, :][:, :, g_idx2] + GT_prob[:, s_idx1, :][:, :, g_idx2]", "min_GP=0.00001, epsilon_conv=1e-2, random_seed=None, verbose=False): \"\"\" Vireo core function to cluster the cells into", "n_donor.\") n_donor = GT_prior.shape[1] # check if n_gt is matched to GT_prior if", "doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it] n_donor_doublt = int(n_donor * (n_donor", "cluster the cells into donors. \"\"\" if random_seed is not None: np.random.seed(random_seed) if", "reduced GT for relatives combn_iter = itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x", "(normalize(_theta_p1, axis=1) + normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) *", "= get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_GT is False: print(\"As GT_prior is", "learn_GT=True, learn_theta=learn_theta, check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet", "normalize(_theta_p2, axis=1)) / 2.0 _theta_sum = np.sqrt(np.sum(_theta_p1, axis=1, keepdims=True) * np.sum(_theta_p2, axis=1, keepdims=True))", "1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig] = (S1_gt * _digmma1", "0_2, 1_2 \"\"\" combn_iter = itertools.combinations(range(GT_prob.shape[2]), 2) gt_idx = np.array([x for x in", "g_idx2 = gt_idx[:, 1] combn_iter = itertools.combinations(range(GT_prob.shape[1]), 2) sp_idx = np.array([x for x", "/ 2) doublet_prob = np.zeros((ID_prob.shape[0], n_donor_doublt)) RV = {} RV['ID_prob'] = ID_prob RV['GT_prob']", "if ID_prob_init is None: ID_prob = normalize(np.random.rand(AD.shape[1], n_donor)) else: ID_prob = normalize(ID_prob_init.copy()) ##", "each component of the variantional distribution. The doublet probability can be created by", "get_GT_prob(AD, DP, ID_prob, theta_shapes, GT_prior) if learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob,", "n_donor: print(\"Warning: n_donor is smaller than samples in GT_prior, hence we \" \"ignore", "= digamma(theta_shapes[ig, 0]).reshape(-1, 1) _digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1,", "/ n_doublet_pair * doublet_prior)) else: Psi_both = Psi.copy() GT_both = GT_prob.copy() theta_both =", "doublet genotype by summarizing their probability: New GT has five categories: 0, 1,", "check_doublet=True) ID_prob = ID_prob2[:, :n_donor] doublet_prob = ID_prob2[:, n_donor:] else: LB_doublet = LB[it]", "* ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt - S1_gt theta_shapes", "x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean =", "itertools.combinations(range(theta_shapes.shape[0]), 2) db_idx = np.array([x for x in combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]]", "import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True, ASE_mode=False,", "theta_prior = np.repeat(np.expand_dims(theta_prior, 2), n_var, axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt", "= AD * ID_prob SS_gt = DP * ID_prob S2_gt = SS_gt -", "combn_iter]) _theta_p1 = theta_shapes[db_idx[:, 0]] _theta_p2 = theta_shapes[db_idx[:, 1]] _theta_mean = (normalize(_theta_p1, axis=1)", "The doublet probability can be created by doublet genotypes \"\"\" if check_doublet: GT_both", "= DP - AD logLik_ID = np.zeros((AD.shape[1], GT_prob.shape[1])) for ig in range(GT_prob.shape[2]): _digmma1", "# http://edwardlib.org/tutorials/probabilistic-pca # https://github.com/allentran/pca-magic import sys import itertools import numpy as np from", "axis=_axis) return theta_shapes def get_ID_prob(AD, DP, GT_prob, theta_shapes, Psi=None): \"\"\" \"\"\" if Psi", "in combn_iter]) # GT combination g_idx1 = gt_idx[:, 0] g_idx2 = gt_idx[:, 1]", "np.append(GT_prior, normalize(np.ones((n_var, n_gt, _add_n)), axis=1)) GT_prob = GT_prior.copy() if learn_GT is False: print(\"As", "_digmma2 = digamma(theta_shapes[ig, 1]).reshape(-1, 1) _digmmas = digamma(theta_shapes[ig, :].sum(axis=0)).reshape(-1, 1) logLik_GT[:, :, ig]", "learn_theta: theta_shapes = get_theta_shapes(AD, DP, ID_prob, GT_prob, theta_prior) ### check how to calculate", ".vireo_base import normalize, loglik_amplify, beta_entropy def vireo_core(AD, DP, n_donor=None, GT_prior=None, learn_GT=True, theta_prior=None, learn_theta=True,", "GT_prior has < 2 donors.\") sys.exit(1) else: n_donor = GT_prior.shape[1] n_var = AD.shape[0]", "SS_gt - S1_gt theta_shapes = theta_prior.copy() for ig in range(theta_shapes.shape[0]): _axis = 1", "## initialize Psi if Psi is None: Psi = np.ones(n_donor) / n_donor else:", "GT_prob, theta_shapes, theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters", "check doublet if check_doublet: ID_prob2, GT_prob, theta_shapes, LB_doublet = update_VB(AD, DP, GT_prob, theta_shapes,", "axis=2) theta_shapes = np.repeat(np.expand_dims(theta_shapes, 2), n_var, axis=2) n_gt = theta_shapes.shape[0] # number of", "theta_prior, GT_prior, Psi, doublet_prior=None, learn_GT=True, learn_theta=True, check_doublet=False): \"\"\" Update the parameters of each", "scipy.stats import entropy from scipy.special import digamma from .vireo_base import normalize, loglik_amplify, beta_entropy", "== 3 else None theta_shapes[ig, 0] += np.sum(S1_gt * GT_prob[:, :, ig], axis=_axis)" ]
[ "= int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command == 'Print':", "command == 'Add': name = tokens[1] index = int(tokens[2]) if name not in", "command == 'Remove': index = int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index)", "index = int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index) elif command ==", "True: line = input() tokens = line.split() command = tokens[0] if command ==", "tokens[0] if command == 'Add': name = tokens[1] index = int(tokens[2]) if name", "print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1]", "command = tokens[0] if command == 'Add': name = tokens[1] index = int(tokens[2])", "if name not in contacts: contacts.append(name) else: if 0 <= index < len(contacts):", "'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts = \" \".join(contacts) print(f'Contacts:", "input() tokens = line.split() command = tokens[0] if command == 'Add': name =", "contacts = input().split() while True: line = input() tokens = line.split() command =", "int(tokens[2]) if name not in contacts: contacts.append(name) else: if 0 <= index <", "== 'Add': name = tokens[1] index = int(tokens[2]) if name not in contacts:", "= int(tokens[2]) if name not in contacts: contacts.append(name) else: if 0 <= index", "'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print('", "int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command", "== 'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts = \" \".join(contacts)", "command == 'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i +", "0 <= index < len(contacts): contacts.insert(index, name) elif command == 'Remove': index =", "<= index < len(contacts): contacts.pop(index) elif command == 'Export': start_i = int(tokens[1]) count", "len(contacts): contacts.insert(index, name) elif command == 'Remove': index = int(tokens[1]) if 0 <=", "contacts.append(name) else: if 0 <= index < len(contacts): contacts.insert(index, name) elif command ==", "= int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif", "count] print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1] == 'Reversed': contacts =", "+ count] print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1] == 'Reversed': contacts", "'.join(contacts_to_print)) elif command == 'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts", "= line.split() command = tokens[0] if command == 'Add': name = tokens[1] index", "= input().split() while True: line = input() tokens = line.split() command = tokens[0]", "index = int(tokens[2]) if name not in contacts: contacts.append(name) else: if 0 <=", "tokens = line.split() command = tokens[0] if command == 'Add': name = tokens[1]", "= input() tokens = line.split() command = tokens[0] if command == 'Add': name", "start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print))", "== 'Remove': index = int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index) elif", "tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts = \" \".join(contacts) print(f'Contacts: {final_contacts}') break", "'Add': name = tokens[1] index = int(tokens[2]) if name not in contacts: contacts.append(name)", "int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command == 'Print': if", "elif command == 'Remove': index = int(tokens[1]) if 0 <= index < len(contacts):", "len(contacts): contacts.pop(index) elif command == 'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print", "contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1]", "< len(contacts): contacts.pop(index) elif command == 'Export': start_i = int(tokens[1]) count = int(tokens[2])", "line.split() command = tokens[0] if command == 'Add': name = tokens[1] index =", "not in contacts: contacts.append(name) else: if 0 <= index < len(contacts): contacts.insert(index, name)", "== 'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count]", "count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command ==", "line = input() tokens = line.split() command = tokens[0] if command == 'Add':", "else: if 0 <= index < len(contacts): contacts.insert(index, name) elif command == 'Remove':", "in contacts: contacts.append(name) else: if 0 <= index < len(contacts): contacts.insert(index, name) elif", "if 0 <= index < len(contacts): contacts.insert(index, name) elif command == 'Remove': index", "command == 'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts = \"", "int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index) elif command == 'Export': start_i", "if 0 <= index < len(contacts): contacts.pop(index) elif command == 'Export': start_i =", "= tokens[1] index = int(tokens[2]) if name not in contacts: contacts.append(name) else: if", "= int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index) elif command == 'Export':", "= tokens[0] if command == 'Add': name = tokens[1] index = int(tokens[2]) if", "contacts.insert(index, name) elif command == 'Remove': index = int(tokens[1]) if 0 <= index", "index < len(contacts): contacts.pop(index) elif command == 'Export': start_i = int(tokens[1]) count =", "= contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1] ==", "elif command == 'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print = contacts[start_i:start_i", "elif command == 'Print': if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts =", "name not in contacts: contacts.append(name) else: if 0 <= index < len(contacts): contacts.insert(index,", "< len(contacts): contacts.insert(index, name) elif command == 'Remove': index = int(tokens[1]) if 0", "input().split() while True: line = input() tokens = line.split() command = tokens[0] if", "if command == 'Add': name = tokens[1] index = int(tokens[2]) if name not", "0 <= index < len(contacts): contacts.pop(index) elif command == 'Export': start_i = int(tokens[1])", "while True: line = input() tokens = line.split() command = tokens[0] if command", "tokens[1] index = int(tokens[2]) if name not in contacts: contacts.append(name) else: if 0", "name = tokens[1] index = int(tokens[2]) if name not in contacts: contacts.append(name) else:", "name) elif command == 'Remove': index = int(tokens[1]) if 0 <= index <", "contacts[start_i:start_i + count] print(' '.join(contacts_to_print)) elif command == 'Print': if tokens[1] == 'Reversed':", "'Remove': index = int(tokens[1]) if 0 <= index < len(contacts): contacts.pop(index) elif command", "if tokens[1] == 'Reversed': contacts = contacts[::-1] final_contacts = \" \".join(contacts) print(f'Contacts: {final_contacts}')", "index < len(contacts): contacts.insert(index, name) elif command == 'Remove': index = int(tokens[1]) if", "contacts.pop(index) elif command == 'Export': start_i = int(tokens[1]) count = int(tokens[2]) contacts_to_print =", "contacts: contacts.append(name) else: if 0 <= index < len(contacts): contacts.insert(index, name) elif command", "<= index < len(contacts): contacts.insert(index, name) elif command == 'Remove': index = int(tokens[1])" ]
[ "f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA", "= f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA", "f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS", "f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR", "DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar =", "f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD", "# HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia =", "bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}'", "wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}'", "creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}'", "livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva,", "= f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' #", "USERNAME USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' #", "# HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz", "# ''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME ''' WEBSITES =", "ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login'", "f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED", "blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}'", "wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews,", "= f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' #", "SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify =", "# 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor", "# ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble", "# CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration", "REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest =", "DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme =", "bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}'", "bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble,", "= f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS", "OF USERNAME ''' WEBSITES = [ instagram, facebook, twitter, youtube, blogger, google_plus, reddit,", "gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}'", "github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}'", "# TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed", "f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN", "HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz =", "ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}'", "# NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva", "instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad,", "socialmedia import USERNAME USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM instagram =", "= f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' #", "500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor =", "LIST - USE FOR SEARCHING OF USERNAME ''' WEBSITES = [ instagram, facebook,", "gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor,", "# VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur", "f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP", "instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}'", "# BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt", "CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}'", "= f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' #", "TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently =", "WEBSITES = [ instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github,", "gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm,", "= f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' #", "flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}'", "# FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad", "f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID", "= f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' #", "TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed =", "f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM", "CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt =", "= f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' #", "KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist =", "f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD", "f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY", "= f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' #", "flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog,", "keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground,", "= f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' #", "= f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' #", "last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt,", "f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK", "disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com'", "# MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk", "f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger", "# YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus", "f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR", "= f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' #", "Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}'", "f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD", "behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare,", "# DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme", "# TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky", "disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo,", "f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD", "= f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' #", "= f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' #", "# BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket", "f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO", "OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello =", "slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}'", "DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme =", "foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}'", "PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox =", "= f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' #", "= f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' #", "# FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud", "= f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' #", "DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers =", "f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET", "deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}'", "ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard =", "# WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github", "f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE", "hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello, tracky, basecamp]", "# DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart", "wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello, tracky,", "WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor =", "scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal,", "google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart,", "= f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' #", "canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}'", "ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground =", "USERNAME ''' WEBSITES = [ instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress,", "- USE FOR SEARCHING OF USERNAME ''' WEBSITES = [ instagram, facebook, twitter,", "f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX", "f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO", "GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress =", "tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}'", "mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate,", "hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack,", "# COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay", "SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd =", "etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}'", "# GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr", "f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON", "reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}'", "# PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr", "# REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp", "f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS", "youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus,", "trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp,", "# IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare", "# GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress", "foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently,", "pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur,", "= f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' #", "colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}'", "f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR", "codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed,", "#BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews =", "tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST -", "codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}'", "contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' #", "facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo,", "ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com'", "= f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' #", "= f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' #", "= f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' #", "= f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' #", "patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm,", "= f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' #", "f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY", "cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}'", "= [ instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr,", "f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR", "f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX", "# SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd", "FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}'", "# ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp", "= f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' #", "NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva =", "basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME", "goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox,", "# WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor", "newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia,", "= f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' #", "= f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' #", "roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com'", "wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}'", "= f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' #", "= f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' #", "CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin =", "fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables,", "= f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM", "= f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' #", "tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay,", "# ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance", "f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY", "= f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' #", "hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}'", "= f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER", "# TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE", "WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market =", "= f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' #", "input(f'[sys] Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook =", "trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}'", "wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}'", "f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO", "= f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' #", "f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK", "BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt =", "= f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' #", ") # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter", "f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST", "= f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE", "BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit =", "f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION", "socialmedia #from socialmedia import USERNAME USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM", "# SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify", "= f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' #", "dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}'", "flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}'", "INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate =", "CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration =", "# STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud", "f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION", "dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}'", "f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET", "houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip,", "LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm =", "reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}'", "f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART", "ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp =", "''' WEBSITES = [ instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest,", "# INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter =", "= f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' #", "blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}'", "# CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px", "vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion,", "COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay =", "scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}'", "f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND", "SEARCHING OF USERNAME ''' WEBSITES = [ instagram, facebook, twitter, youtube, blogger, google_plus,", "= f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' #", "roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz,", "medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon,", "= f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' #", "# MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo", "f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME ''' WEBSITES", "DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart =", "houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' #", "VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur =", "# BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR SEARCHING", "USE FOR SEARCHING OF USERNAME ''' WEBSITES = [ instagram, facebook, twitter, youtube,", "f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS", "BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket =", "= f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' #", "# SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium", "= f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' #", "<reponame>elithaxxor/pi_repo #import socialmedia #from socialmedia import USERNAME USERNAME = input(f'[sys] Enter Username:' )", "f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY", "f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT", "kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad,", "# OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello", "# EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid", "github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard,", "= f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' #", "= f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' #", "= f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' #", "BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages =", "SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium =", "MEDIUM medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk =", "= f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE", "f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST", "TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam =", "FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo =", "f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE", "# GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad", "dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px,", "vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}'", "HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}'", "= f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' #", "twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com'", "# FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog", "= f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' #", "IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack =", "# HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation", "#from socialmedia import USERNAME USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM instagram", "cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar, pastebin,", "WEBSITE LIST - USE FOR SEARCHING OF USERNAME ''' WEBSITES = [ instagram,", "badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist,", "= f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' #", "ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble =", "patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}'", "# BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables", "# KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal", "vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}'", "designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}'", "= f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' #", "reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk,", "badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}'", "f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM", "#TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger =", "deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket,", "# WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market", "f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ", "= f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' #", "f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME", "vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud,", "f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES", "IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare =", "= f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px = f'https://500px.com/{USERNAME}' #", "f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME", "goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}'", "reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello, tracky, basecamp] #", "# TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently", "''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME ''' WEBSITES = [", "wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme,", "f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR", "BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables =", "canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation,", "slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}'", "soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd,", "f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm", "spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}'", "= f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' #", "TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST", "# DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers", "# PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox", "steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}'", "# REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest", "trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}'", "= f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' #", "= f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' #", "SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon =", "KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal =", "okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}'", "= f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' #", "import USERNAME USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}'", "f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS", "= f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' #", "GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr =", "INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}'", "# SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip", "angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}'", "mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}'", "= f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' #", "tumblr, flickr, steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare,", "= f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' #", "= f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' #", "# ABOUT.ME aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard", "# BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy", "CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads =", "ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance =", "# SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo = f'https://www.badoo.com/en/{USERNAME}' # PATREON patreon", "# VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus", "pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}'", "# INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate", "# BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit", "f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM", "# CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads", "kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}'", "# CANVA canva = f'https://www.canva.com/{USERNAME}' # CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt", "slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads,", "imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}'", "buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com'", "CREATIVEMARKET creative_market = f'https://creativemarket.com/{USERNAME}' # TRAKT trakt = f'https://www.trakt.tv/users/{USERNAME}' # 500PX five_hundred_px =", "# LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy", "f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR", "blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus, medium,", "# PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion", "f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia", "livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}'", "STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud =", "f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT", "creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration,", "= f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' #", "f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB", "steam, vimeo, soundcloud, disqus, medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify,", "YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus =", "f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD", "FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog =", "codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}'", "= f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' #", "f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES", "USERNAME = input(f'[sys] Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK", "EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid =", "GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase =", "f'https://angel.co/{USERNAME}' # LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY", "Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER", "PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr =", "GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad =", "= f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' #", "buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt,", "= f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' #", "last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}'", "flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance,", "f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+", "[ instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr,", "# TUMBLR tumblr = f'https://{USERNAME}.tumblr.com' # FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam", "= f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' #", "= f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' #", "hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}'", "instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' #", "facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube = f'https://www.youtube.com/{USERNAME}' #", "GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare =", "= f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp = f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' #", "tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages = f'https://{USERNAME}.hubpages.com' # CONTENTLY contently = f'https://{USERNAME}.contently.com'", "aboutme, imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy,", "# GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare", "f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP", "LAST.FM last_fm = f'https://last.fm/user/{USERNAME}' # DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy =", "medium = f'https://medium.com/@{USERNAME}' # DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}'", "= f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' #", "f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD", "TRIP trip = f'https://www.trip.skyscanner.com/user/{USERNAME}' # ELLO ello = f'https://ello.co/{USERNAME}' # TRACKY tracky =", "f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP", "pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages,", "youtube = f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top'", "soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus = f'https://disqus.com/by/{USERNAME}' # MEDIUM medium = f'https://medium.com/@{USERNAME}'", "WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github =", "f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION", "# FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube =", "f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE", "# CONTENTLY contently = f'https://{USERNAME}.contently.com' # HOUZZ houzz = f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm =", "HACKERNEWS hackernews = f'https://news.ycombinator.com/user?id={USERNAME}' # CODEMENTOR codementor = f'https://www.codementor.io/{USERNAME}' # REVERBNATION reverb_nation =", "five_hundred_px, buzzfeed, tripadvisor, hubpages, contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers,", "etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy, gravatar,", "f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' # FOTOLOG", "= f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}' #", "f'https://ello.co/{USERNAME}' # TRACKY tracky = f'https://tracky.com/user/~{USERNAME}' # BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # '''", "FOTOLOG fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud =", "# BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}' # HUBPAGES hubpages", "f'https://www.bandcamp.com/{USERNAME}' # COLOURLOVERS colourlovers = f'https://www.colourlovers.com/love/{USERNAME}' # IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY", "FOR SEARCHING OF USERNAME ''' WEBSITES = [ instagram, facebook, twitter, youtube, blogger,", "f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook = f'https://www.facebook.com/{USERNAME}' #TWITTER twitter = f'https://www.twitter.com/{USERNAME}' # YOUTUBE youtube", "# GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase", "f'https://www.youtube.com/{USERNAME}' # BLOGGER blogger = f'https://{USERNAME}.blogspot.com' # GOOGLE+ google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT", "# DRIBBBLE dribbble = f'https://dribbble.com/{USERNAME}' # CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar", "newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}' # CANVA canva = f'https://www.canva.com/{USERNAME}'", "imgur, flipboard, slideshare, fotolog, spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme,", "BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion = f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy =", "VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' # DISQUS disqus =", "# IFTTT ifttt = f'https://www.ifttt.com/p/{USERNAME}' # EBAY ebay = f'https://www.ebay.com/usr/{USERNAME}' # SLACK slack", "= f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}' # SLIDESHARE slideshare = f'https://slideshare.net/{USERNAME}' #", "google_plus = f'https://plus.google.com/s/{USERNAME}/top' # REDDIT reddit = f'https://www.reddit.com/user/{USERNAME}' # WORDPRESS wordpress = f'https://{USERNAME}.wordpress.com'", "f'https://www.dailymotion.com/{USERNAME}' # ETSY etsy = f'https://www.etsy.com/shop/{USERNAME}' # CASHME cashme = f'https://cash.me/{USERNAME}' # BEHANCE", "fotolog = f'https://fotolog.com/{USERNAME}' # SPOTIFY spotify = f'https://open.spotify.com/user/{USERNAME}' # MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}'", "# KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist", "keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com'", "= input(f'[sys] Enter Username:' ) # INSTAGRAM instagram = f'https://www.instagram.com/{USERNAME}' # FACEBOOK facebook", "REVERBNATION reverb_nation = f'https://www.reverbnation.com/{USERNAME}' # DESIGNSPIRATION designspiration = f'https://www.designspiration.net/{USERNAME}' # BANDCAMP bandcamp =", "blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello,", "BASECAMP basecamp = f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR SEARCHING OF", "instagram, facebook, twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam,", "spotify, mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance, goodreads, instructables, keybase,", "PATREON patreon = f'https://www.patreon.com/{USERNAME}' # BITBUCKET bitbucket = f'https://bitbucket.org/{USERNAME}' # DAILYMOTION dailymotion =", "f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin = f'https://pastebin.com/u/{USERNAME}' # FOURSQUARE", "# LIVEJOURNAL livejournal = f'https://{USERNAME}.livejournal.com' # ANGELLIST angellist = f'https://angel.co/{USERNAME}' # LAST.FM last_fm", "angellist, last_fm, dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground, wattpad, canva, creative_market,", "MIXCLOUD mixcloud = f'https://www.mixcloud.com/{USERNAME}' # SCRIBD scribd = f'https://www.scribd.com/{USERNAME}' # BADOO badoo =", "five_hundred_px = f'https://500px.com/{USERNAME}' # BUZZFEED buzzfeed = f'https://buzzfeed.com/{USERNAME}' # TRIPADVISOR tripadvisor = f'https://tripadvisor.com/members/{USERNAME}'", "f'https://{USERNAME}.wordpress.com' # PINTEREST pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR", "= f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo = f'https://vimeo.com/{USERNAME}' # SOUNDCLOUD soundcloud = f'https://soundcloud.com/{USERNAME}' #", "SLACK slack = f'https://{USERNAME}.slack.com' # OKCUPID okcupid = f'https://www.okcupid.com/profile/{USERNAME}' # TRIP trip =", "#import socialmedia #from socialmedia import USERNAME USERNAME = input(f'[sys] Enter Username:' ) #", "FOURSQUARE foursquare = f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad =", "gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground = f'https://{USERNAME}.newgrounds.com' # WATTPAD wattpad = f'https://www.wattpad.com/user/{USERNAME}'", "# ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' # NEWSGROUND newsground", "f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE kongregate = f'https://kongregate.com/accounts/{USERNAME}' # LIVEJOURNAL", "twitter, youtube, blogger, google_plus, reddit, wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud,", "# FLICKR flickr = f'https://www.flickr.com/people/{USERNAME}' # STEAM steam = f'https://steamcommunity.com/id/{USERNAME}' # VIMEO vimeo", "= f'https://foursquare.com/{USERNAME}' # ROBLOX roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}' # GUMROAD gumroad = f'https://www.gumroad.com/{USERNAME}' #", "pinterest = f'https://www.pinterest.com/{USERNAME}' # GITHUB github = f'https://www.github.com/{USERNAME}' # TUMBLR tumblr = f'https://{USERNAME}.tumblr.com'", "# CODECADEMY codecademy = f'https://www.codecademy.com/{USERNAME}' # GRAVATAR gravatar = f'https://en.gravatar.com/{USERNAME}' # PASTEBIN pastebin", "f'https://houzz.com/user/{USERNAME}' #BLIP.FM blipfm = f'https://blip.fm/{USERNAME}' # WIKIPEDIA wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}' # HACKERNEWS hackernews", "= f'https://{USERNAME}.basecamphq.com/login' # ''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME '''", "contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration, bandcamp, colourlovers, ifttt, ebay, slack, okcupid,", "behance = f'https://www.behance.net/{USERNAME}' # GOODREADS goodreads = f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}'", "aboutme = f'https://about.me/{USERNAME}' # IMGUR imgur = f'https://imgur.com/user/{USERNAME}' # FLIPBOARD flipboard = f'https://flipboard.com/@{USERNAME}'", "f'https://www.goodreads.com/{USERNAME}' # INSTRUCTABLES instructables = f'https://www.instructables.com/member/{USERNAME}' # KEYBASE keybase = f'https://keybase.io/{USERNAME}' # KONGREGATE", "dailymotion, etsy, cashme, behance, goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm, dribbble, codecademy,", "# DEVIANTART deviantart = f'https://{USERNAME}.deviantart.com' # VK vk = f'https://vk.com/{USERNAME}' # ABOUT.ME aboutme" ]
[ "in config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other): return", "= config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other): return (self.port == other.port", "other.port and self.password == <PASSWORD>.password and self.method == other.method and self.id == other.id)", "def __init__(self, config): self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if", "= int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config: self.id", "int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config: self.id =", "self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config:", "else: self.id = str(self.port) def __eq__(self, other): return (self.port == other.port and self.password", "config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other): return (self.port", "__eq__(self, other): return (self.port == other.port and self.password == <PASSWORD>.password and self.method ==", "config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other): return (self.port == other.port and", "= str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port)", "str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id", "__init__(self, config): self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\"", "= str(self.port) def __eq__(self, other): return (self.port == other.port and self.password == <PASSWORD>.password", "(self.port == other.port and self.password == <PASSWORD>.password and self.method == other.method and self.id", "return (self.port == other.port and self.password == <PASSWORD>.password and self.method == other.method and", "self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other): return (self.port ==", "\"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self, other):", "= str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else:", "self.id = str(self.port) def __eq__(self, other): return (self.port == other.port and self.password ==", "User(object): def __init__(self, config): self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"])", "str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def", "def __eq__(self, other): return (self.port == other.port and self.password == <PASSWORD>.password and self.method", "== other.port and self.password == <PASSWORD>.password and self.method == other.method and self.id ==", "self.method = str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id =", "config): self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in", "other): return (self.port == other.port and self.password == <PASSWORD>.password and self.method == other.method", "self.password = str(config[\"password\"]) self.method = str(config[\"method\"]) if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"]", "str(self.port) def __eq__(self, other): return (self.port == other.port and self.password == <PASSWORD>.password and", "class User(object): def __init__(self, config): self.port = int(config[\"port\"]) self.password = str(config[\"password\"]) self.method =", "if \"_id\" in config: self.id = config[\"_id\"][\"$oid\"] else: self.id = str(self.port) def __eq__(self," ]
[ "in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line)", "test function \"\"\" pass def test_remove_html_character_references(self): # test that the function performs as", "the function performs as expected line = \"the quick brown \\t fox jumps", "tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\"", "\"Desk\") # test that the function has no side effects word = \"Investment&quot;\"", "tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\"", "\"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has no side", "def test_remove_html_character_references(self): # test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"),", "function will be called before any test is run. It's a useful place", "\\r\\n over the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\"", "setUp(self): # for some reason, running spark code within a unittest throws a", "in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): #", "within a unittest throws a bunch of ResourceWarnings # check out this issue:", "Unit tests for functions in the src.utilities package \"\"\" import unittest import warnings", "on an actual cluster. A local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf',", "unittest throws a bunch of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\",", "in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\",", "the function has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def", "issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test, we don't want", "combined = list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx]) if __name__ ==", "in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\",", "self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has no side effects", "category=ResourceWarning) def test_custom_zip(self): # for this test, we don't want to run on", "https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test, we don't want to", "self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\"", "tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens)", "that the function has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\")", "function performs as expected line = \"the quick brown \\t fox jumps \\r\\n", "tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels", "# the custom zip function should work on RDDs with different numbers of", "in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\",", "# for this test, we don't want to run on an actual cluster.", "for functions in the src.utilities package \"\"\" import unittest import warnings from pyspark", "any test is run. It's a useful place to do initialization that otherwise", "preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\"", "setUp(self): \"\"\" This setup function will be called before any test is run.", "src.utilities package \"\"\" import unittest import warnings from pyspark import SparkContext, SparkConf import", "self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in", "self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some", "called before any test is run. It's a useful place to do initialization", "\"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has", "pass def test_remove_html_character_references(self): # test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\")", "would have to be repeated for every test function \"\"\" pass def test_remove_html_character_references(self):", "word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the function", "SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares =", "in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in", "self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\"", "side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that", "in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in", "fox jumps \\r\\n over the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in", "self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function", "filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def", "tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\",", "that the function has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\")", "has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): #", "self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"),", "of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): #", "import warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils", "# check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this", "ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for", "def test_custom_zip(self): # for this test, we don't want to run on an", "as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will be called", "some reason, running spark code within a unittest throws a bunch of ResourceWarnings", "A local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf)", "Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2", "\\t fox jumps \\r\\n over the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\"", "tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens", "= \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the function performs", "don't want to run on an actual cluster. A local master is sufficient", "otherwise would have to be repeated for every test function \"\"\" pass def", "self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that", "over the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in", "in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self):", "throws a bunch of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning)", "preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens)", "\"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the", "\"Investment.\") def test_tokenize(self): # test that the function performs as expected line =", "labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered =", "side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that", "= [num**2 for num in nums] pairs = [(num, num**2) for num in", "actual cluster. A local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc", "class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will be called before any", "test_custom_zip(self): # for this test, we don't want to run on an actual", "for num in nums] pairs = [(num, num**2) for num in nums] #", "\"\"\" import unittest import warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess as", "= [(num, num**2) for num in nums] # the custom zip function should", "self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\"", "quick brown \\t fox jumps \\r\\n over the lazy \\n dog\" tokens =", "effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the", "def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in", "= utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for idx, tuple in", "from pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as utils", "sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0,", "unittest import warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess import", "as expected line = \"the quick brown \\t fox jumps \\r\\n over the", "the src.utilities package \"\"\" import unittest import warnings from pyspark import SparkContext, SparkConf", "numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined =", "combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for idx, tuple", "to be repeated for every test function \"\"\" pass def test_remove_html_character_references(self): # test", "def setUp(self): \"\"\" This setup function will be called before any test is", "pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as utils class", "to do initialization that otherwise would have to be repeated for every test", "5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined", "rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined", "nums] # the custom zip function should work on RDDs with different numbers", "preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens)", "in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in", "def setUp(self): # for some reason, running spark code within a unittest throws", "of slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1,", "\"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4)", "be called before any test is run. It's a useful place to do", "local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\")", "want to run on an actual cluster. A local master is sufficient conf", "preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the function performs as expected", "combined.sortByKey() combined = list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx]) if __name__", "self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self):", "tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens)", "# test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"),", "setup function will be called before any test is run. It's a useful", "test that the function has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word,", "expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\")", "for every test function \"\"\" pass def test_remove_html_character_references(self): # test that the function", "[num**2 for num in nums] pairs = [(num, num**2) for num in nums]", "\"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test", "in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self):", "3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for idx,", "import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will", "package \"\"\" import unittest import warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess", "\"\"\" Unit tests for functions in the src.utilities package \"\"\" import unittest import", "[\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered),", "test_tokenize(self): # test that the function performs as expected line = \"the quick", "= SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares", "nums = list(range(0, 10)) squares = [num**2 for num in nums] pairs =", "\"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has no side effects word", "repeated for every test function \"\"\" pass def test_remove_html_character_references(self): # test that the", "sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for", "master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums", "is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums =", "functions in the src.utilities package \"\"\" import unittest import warnings from pyspark import", "will be called before any test is run. It's a useful place to", "in the src.utilities package \"\"\" import unittest import warnings from pyspark import SparkContext,", "have to be repeated for every test function \"\"\" pass def test_remove_html_character_references(self): #", "\"\"\" This setup function will be called before any test is run. It's", "a unittest throws a bunch of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912", "self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has no side effects word =", "self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") #", "TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will be called before any test", "= \"the quick brown \\t fox jumps \\r\\n over the lazy \\n dog\"", "test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens)", "src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will be", "tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\",", "tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens)", "should work on RDDs with different numbers of slices rdd1 = sc.parallelize(nums, 5)", "in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some reason,", "num**2) for num in nums] # the custom zip function should work on", "running spark code within a unittest throws a bunch of ResourceWarnings # check", "cluster. A local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc =", "with different numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3)", "src.utilities.preprocess as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This", "self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some reason, running spark", "rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple,", "self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens =", "= SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2 for num in", "useful place to do initialization that otherwise would have to be repeated for", "\"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in", "effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the", "word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the function", "\"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has no side", "tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\"", "= preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered)", "[(num, num**2) for num in nums] # the custom zip function should work", "\"the quick brown \\t fox jumps \\r\\n over the lazy \\n dog\" tokens", "def test_tokenize(self): # test that the function performs as expected line = \"the", "nums] pairs = [(num, num**2) for num in nums] # the custom zip", "= list(range(0, 10)) squares = [num**2 for num in nums] pairs = [(num,", "\"snow\") # test that the function has no side effects word = \"Investment.\"", "function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"),", "\"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the function performs as", "self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has no", "tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\",", "tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens)", "test_remove_html_character_references(self): # test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\")", "TestUtils(unittest.TestCase): def setUp(self): # for some reason, running spark code within a unittest", "\"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens)", "sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2 for num in nums] pairs", "line = \"the quick brown \\t fox jumps \\r\\n over the lazy \\n", "self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def", "line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\"", "warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test, we don't want to run", "be repeated for every test function \"\"\" pass def test_remove_html_character_references(self): # test that", "custom zip function should work on RDDs with different numbers of slices rdd1", "\"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test", "test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\")", "test that the function has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word,", "preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test that the function performs as expected", "expected line = \"the quick brown \\t fox jumps \\r\\n over the lazy", "# test that the function performs as expected line = \"the quick brown", "= combined.sortByKey() combined = list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx]) if", "test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered", "4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered)", "test, we don't want to run on an actual cluster. A local master", "RDDs with different numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares,", "squares = [num**2 for num in nums] pairs = [(num, num**2) for num", "the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\")", "tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line", "as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"),", "for num in nums] # the custom zip function should work on RDDs", "= [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels)", "an actual cluster. A local master is sufficient conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true')", "test is run. It's a useful place to do initialization that otherwise would", "function has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self):", "the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens)", "self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the function performs as expected line", "in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens)", "\"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\"", "self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\"", "filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some reason, running spark code within", "function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"),", "code within a unittest throws a bunch of ResourceWarnings # check out this", "is run. It's a useful place to do initialization that otherwise would have", "that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"),", "bunch of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self):", "this test, we don't want to run on an actual cluster. A local", "in nums] pairs = [(num, num**2) for num in nums] # the custom", "in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in", "warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as", "test that the function performs as expected line = \"the quick brown \\t", "import SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase):", "in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in", "self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has no side effects word =", "initialization that otherwise would have to be repeated for every test function \"\"\"", "as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"),", "run. It's a useful place to do initialization that otherwise would have to", "before any test is run. It's a useful place to do initialization that", "self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels =", "as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup", "'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2 for", "num in nums] # the custom zip function should work on RDDs with", "# test that the function has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word)", "dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens)", "self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in", "no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test", "self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has no side effects", "has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): #", "out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test, we", "= preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in", "\"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in", "self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\"", "= preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in", "\"Investment&quot;\") def test_strip_punctuation(self): # test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\")", "test_strip_punctuation(self): # test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\")", "\"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered)", "self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class", "sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey()", "= sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect())", "\"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has", "sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2 for num", "a useful place to do initialization that otherwise would have to be repeated", "on RDDs with different numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2 =", "that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"),", "every test function \"\"\" pass def test_remove_html_character_references(self): # test that the function performs", "def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"]", "self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function", "class TestUtils(unittest.TestCase): def setUp(self): # for some reason, running spark code within a", "combined = combined.sortByKey() combined = list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx])", "a bunch of ResourceWarnings # check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def", "utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function will be called before", "def test_strip_punctuation(self): # test that the function performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"),", "check out this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test,", "that otherwise would have to be repeated for every test function \"\"\" pass", "self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that", "tests for functions in the src.utilities package \"\"\" import unittest import warnings from", "do initialization that otherwise would have to be repeated for every test function", "\"\"\" pass def test_remove_html_character_references(self): # test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"),", "list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx]) if __name__ == '__main__': unittest.main()", "performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\")", "lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\"", "brown \\t fox jumps \\r\\n over the lazy \\n dog\" tokens = preprocess.tokenize(line)", "# for some reason, running spark code within a unittest throws a bunch", "filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for", "self.assertTrue(\"jumps\" in tokens) self.assertTrue(\"over\" in tokens) self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\"", "= \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in", "= \"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the function performs", "conf = SparkConf().setAppName(\"Unit Tests\").setMaster(\"local\").set('spark.logConf', 'true') sc = SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10))", "self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") # test that the function has no", "= sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined =", "in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\"", "list(range(0, 10)) squares = [num**2 for num in nums] pairs = [(num, num**2)", "this issue: https://github.com/requests/requests/issues/3912 warnings.filterwarnings(action=\"ignore\", category=ResourceWarning) def test_custom_zip(self): # for this test, we don't", "function \"\"\" pass def test_remove_html_character_references(self): # test that the function performs as expected", "\\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in", "tokens) self.assertTrue(\"quick\" in tokens) self.assertTrue(\"brown\" in tokens) self.assertTrue(\"fox\" in tokens) self.assertTrue(\"jumps\" in tokens)", "in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase):", "the custom zip function should work on RDDs with different numbers of slices", "that the function performs as expected line = \"the quick brown \\t fox", "self.assertTrue(\"the\" in tokens) self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line =", "no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self): # test", "import unittest import warnings from pyspark import SparkContext, SparkConf import src.utilities.preprocess as preprocess", "It's a useful place to do initialization that otherwise would have to be", "SparkContext(conf=conf) sc.setLogLevel(\"FATAL\") nums = list(range(0, 10)) squares = [num**2 for num in nums]", "in nums] # the custom zip function should work on RDDs with different", "zip function should work on RDDs with different numbers of slices rdd1 =", "import src.utilities.preprocess as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\"", "the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\")", "jumps \\r\\n over the lazy \\n dog\" tokens = preprocess.tokenize(line) self.assertTrue(\"the\" in tokens)", "This setup function will be called before any test is run. It's a", "function has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def test_strip_punctuation(self):", "SparkContext, SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def", "work on RDDs with different numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2", "expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\")", "the function has no side effects word = \"Investment&quot;\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment&quot;\") def", "\"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"air&amp;plane\"), \"airplane\") self.assertEqual(preprocess.remove_html_character_references(\"&quot;government&quot;\"), \"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the", "performs as expected self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\")", "<filename>tests/test_utilities.py \"\"\" Unit tests for functions in the src.utilities package \"\"\" import unittest", "run on an actual cluster. A local master is sufficient conf = SparkConf().setAppName(\"Unit", "filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\" in", "filtered) self.assertTrue(\"MCAT\" in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some reason, running", "# test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"),", "# test that the function has no side effects word = \"Investment.\" preprocess.remove_html_character_references(word)", "utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined = list(combined.collect()) for idx, tuple in enumerate(pairs):", "self.assertEqual(preprocess.strip_punctuation(\"'snow\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow.\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow!\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"?snow?\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"snow\\\"\"), \"snow\") self.assertEqual(preprocess.strip_punctuation(\"sn!ow\"), \"snow\") #", "preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self): \"\"\" This setup function", "different numbers of slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined", "slices rdd1 = sc.parallelize(nums, 5) rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2)", "for this test, we don't want to run on an actual cluster. A", "\"government\") self.assertEqual(preprocess.remove_html_character_references(\"Desk&quot;\"), \"Desk\") # test that the function has no side effects word", "rdd2 = sc.parallelize(squares, 3) combined = utils.custom_zip(rdd1, rdd2) combined = combined.sortByKey() combined =", "in filtered) class TestUtils(unittest.TestCase): def setUp(self): # for some reason, running spark code", "tokens) self.assertTrue(\"dog\" in tokens) def test_split_by_comma(self): line = \"the,quick,brown,fox,jumps,over,the,lazy,dog\" tokens = preprocess.split_by_comma(line) self.assertTrue(\"the\"", "for some reason, running spark code within a unittest throws a bunch of", "pairs = [(num, num**2) for num in nums] # the custom zip function", "\"ECAT\", \"MCAT\", \"E12\", \"E54\" \"G154\", \"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\"", "reason, running spark code within a unittest throws a bunch of ResourceWarnings #", "spark code within a unittest throws a bunch of ResourceWarnings # check out", "we don't want to run on an actual cluster. A local master is", "10)) squares = [num**2 for num in nums] pairs = [(num, num**2) for", "function should work on RDDs with different numbers of slices rdd1 = sc.parallelize(nums,", "self.assertTrue(\"lazy\" in tokens) self.assertTrue(\"dog\" in tokens) def test_remove_irrelevant_labels(self): labels = [\"GCAT\", \"CCAT\", \"ECAT\",", "num in nums] pairs = [(num, num**2) for num in nums] # the", "SparkConf import src.utilities.preprocess as preprocess import src.utilities.utils as utils class TestPreprocess(unittest.TestCase): def setUp(self):", "\"Investment.\" preprocess.remove_html_character_references(word) self.assertEqual(word, \"Investment.\") def test_tokenize(self): # test that the function performs as", "place to do initialization that otherwise would have to be repeated for every", "performs as expected line = \"the quick brown \\t fox jumps \\r\\n over", "= list(combined.collect()) for idx, tuple in enumerate(pairs): self.assertEqual(tuple, combined[idx]) if __name__ == '__main__':", "to run on an actual cluster. A local master is sufficient conf =", "test that the function performs as expected self.assertEqual(preprocess.remove_html_character_references(\"&quot;snow\"), \"snow\") self.assertEqual(preprocess.remove_html_character_references(\"desk&quot;\"), \"desk\") self.assertEqual(preprocess.remove_html_character_references(\"airplane&amp;\"), \"airplane\")", "\"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered) self.assertTrue(\"ECAT\"", "\"M13\", \"GWEA\"] filtered = preprocess.remove_irrelevant_labels(labels) self.assertEqual(len(filtered), 4) self.assertTrue(\"GCAT\" in filtered) self.assertTrue(\"CCAT\" in filtered)" ]
[ "application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from", "\"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data", "# Remove unneeded product data data = [] for i in range(len(unfiltered_data)): data.append(", "Morphware headers = { # Already added when you pass json= # 'Content-Type':", "headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data", "for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"],", "} ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded", "import Morphware def main(): \"\"\" Print the size of the data returned by", "returned by a Morphware query using Morphare-provided data source endpoint: curl --request POST", "len(unfiltered_data)) # Remove unneeded product data data = [] for i in range(len(unfiltered_data)):", "of the data returned by a Morphware query (data reported).\"\"\" import json import", "requests from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the size of the", "\"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\":", "Remove unneeded product data data = [] for i in range(len(unfiltered_data)): data.append( json.dumps(", "# 'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\",", "by a Morphware query using Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2", "of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample", "in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number", "{ # Already added when you pass json= # 'Content-Type': 'application/json', } json_data", "when you pass json= # 'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\",", "json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\":", "--request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py", "i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\":", "sys import requests from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the size", "Morphware query using Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type:", "curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python", "rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove", "encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being reported:", "a Morphware query using Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H", "unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand", "q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\")", "import json import sys import requests from telliot_core.queries.morphware import Morphware def main(): \"\"\"", "pass json= # 'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\", \"service\": \"compute\",", "range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of", "# Already added when you pass json= # 'Content-Type': 'application/json', } json_data =", "http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" #", "the size of the data returned by a Morphware query (data reported).\"\"\" import", "json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data data = [] for", "print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex()) if __name__ == \"__main__\":", "<filename>scripts/morphware_data_sizes.py \"\"\"Print the size of the data returned by a Morphware query (data", "} rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) #", "[] for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA", "Already added when you pass json= # 'Content-Type': 'application/json', } json_data = {", "\"\"\"Print the size of the data returned by a Morphware query (data reported).\"\"\"", "of the data returned by a Morphware query using Morphare-provided data source endpoint:", "print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data data = [] for i", "\"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per", "'application/json', } json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp", ") print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded string[] q", "unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"],", "= { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers,", "= json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data data = []", "\"\"\" Print the size of the data returned by a Morphware query using", "{ \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"],", "data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"],", "Morphware def main(): \"\"\" Print the size of the data returned by a", "source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example:", "headers = { # Already added when you pass json= # 'Content-Type': 'application/json',", "endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $", "\"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data", "= Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") #", "# Retrieve data from source provided by Morphware headers = { # Already", "filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded string[] q = Morphware(version=1)", "data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded string[] q = Morphware(version=1) submit_value", "added when you pass json= # 'Content-Type': 'application/json', } json_data = { \"provider\":", "size of the data returned by a Morphware query (data reported).\"\"\" import json", "the size of the data returned by a Morphware query using Morphare-provided data", "reported).\"\"\" import json import sys import requests from telliot_core.queries.morphware import Morphware def main():", "\"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data =", "json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\",", "by a Morphware query (data reported).\"\"\" import json import sys import requests from", "data data = [] for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance", "size of encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data", "POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\"", "of encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being", "query (data reported).\"\"\" import json import sys import requests from telliot_core.queries.morphware import Morphware", "Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided by Morphware", "= q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex()) if __name__", "Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], }", "'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", }", "\\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source", "# Get size of encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size", "telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the size of the data returned", "python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided by Morphware headers =", "(data reported).\"\"\" import json import sys import requests from telliot_core.queries.morphware import Morphware def", "Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d", "\"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } )", "{ \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data)", "-d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided", "unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) )", "Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex())", "} json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\": \"us-east-1\", } rsp =", "= { # Already added when you pass json= # 'Content-Type': 'application/json', }", "\"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample filtered data:\")", "Retrieve data from source provided by Morphware headers = { # Already added", "Print the size of the data returned by a Morphware query using Morphare-provided", "products:\", len(unfiltered_data)) # Remove unneeded product data data = [] for i in", "\"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num", "provided by Morphware headers = { # Already added when you pass json=", "data from source provided by Morphware headers = { # Already added when", "you pass json= # 'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\", \"service\":", "Morphware query (data reported).\"\"\" import json import sys import requests from telliot_core.queries.morphware import", "indent=4)) # Get size of encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data)", "data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}'", "per Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size", "q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex()) if __name__ ==", "from source provided by Morphware headers = { # Already added when you", "the data returned by a Morphware query (data reported).\"\"\" import json import sys", "product data data = [] for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\":", "the data returned by a Morphware query using Morphare-provided data source endpoint: curl", "data = [] for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"],", "CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample filtered", "Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of", ") ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded string[]", "print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get size of encoded string[] q =", "import requests from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the size of", "= requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded", "scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided by Morphware headers = {", "\"\"\" # Retrieve data from source provided by Morphware headers = { #", "Type\"], \"cudaCores\": unfiltered_data[i][\"CUDA Cores\"], \"numCPUs\": unfiltered_data[i][\"Number of CPUs\"], \"RAM\": unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price", "\"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\",", "json= # 'Content-Type': 'application/json', } json_data = { \"provider\": \"amazon\", \"service\": \"compute\", \"region\":", "json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data data", "'{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided by", "query using Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\"", "\"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data))", "json import sys import requests from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print", "requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product", "print(json.dumps(data[:2], indent=4)) # Get size of encoded string[] q = Morphware(version=1) submit_value =", "main(): \"\"\" Print the size of the data returned by a Morphware query", "= [] for i in range(len(unfiltered_data)): data.append( json.dumps( { \"instanceType\": unfiltered_data[i][\"Instance Type\"], \"cudaCores\":", "unfiltered_data[i][\"RAM\"], \"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2],", "a Morphware query (data reported).\"\"\" import json import sys import requests from telliot_core.queries.morphware", "unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) #", "unneeded product data data = [] for i in range(len(unfiltered_data)): data.append( json.dumps( {", "Price per Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4)) # Get", "from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the size of the data", "Get size of encoded string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of", "of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex()) if __name__ == \"__main__\": main()", "unfiltered_data = json.loads(rsp.text) print(\"Num products:\", len(unfiltered_data)) # Remove unneeded product data data =", "returned by a Morphware query (data reported).\"\"\" import json import sys import requests", "$ python scripts/morphware_data_sizes.py \"\"\" # Retrieve data from source provided by Morphware headers", "source provided by Morphware headers = { # Already added when you pass", "data returned by a Morphware query using Morphare-provided data source endpoint: curl --request", "by Morphware headers = { # Already added when you pass json= #", "submit_value = q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)} bytes\") # print(submit_value.hex()) if", "import sys import requests from telliot_core.queries.morphware import Morphware def main(): \"\"\" Print the", "\"onDemandPricePerHour\": unfiltered_data[i][\"On-demand Price per Hour\"], } ) ) print(\"Expample filtered data:\") print(json.dumps(data[:2], indent=4))", "data returned by a Morphware query (data reported).\"\"\" import json import sys import", "-H \"Content-Type: application/json\" \\ -d '{\"provider\":\"amazon\",\"service\":\"compute\",\"region\":\"us-east-1\"}' Example: $ python scripts/morphware_data_sizes.py \"\"\" # Retrieve", "\"service\": \"compute\", \"region\": \"us-east-1\", } rsp = requests.post(\"http://172.16.31.10:5000/products-2\", headers=headers, json=json_data) unfiltered_data = json.loads(rsp.text)", "using Morphare-provided data source endpoint: curl --request POST http://167.172.239.133:5000/products-2 -H \"Content-Type: application/json\" \\", "def main(): \"\"\" Print the size of the data returned by a Morphware", "string[] q = Morphware(version=1) submit_value = q.value_type.encode(data) print(f\"Size of data being reported: {sys.getsizeof(submit_value)}", "size of the data returned by a Morphware query using Morphare-provided data source" ]
[ "min_delta : float, optional minimum difference between new loss and old loss for", "to predict on device : str 'cuda' or 'cpu' model : torch.nn.Module model", "str 'regression' or 'classification', for the latter an arg max is applied dim", "loss loss function argmax : bool, optional if True, an arg max is", "is 0.) \"\"\" self.patience = patience self.min_delta = min_delta self.counter = 0 self.best_loss", "data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred = pred.argmax(1) pred.append(out);", "device='cuda'): \"\"\"Predicts and computes the loss over the provided data Parameters ---------- model", "with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out", "between new loss and old loss for new loss to be considered as", "series loss : torch loss loss function argmax : bool, optional if True,", ": torch loss loss function argmax : bool, optional if True, an arg", "or float \"\"\" if self.best_loss == None: self.best_loss = val_loss elif self.best_loss -", "an improvement counter : int number of epochs without improvement best_loss : float", "self.min_delta: self.counter += 1 if self.counter >= self.patience: print('Early stopping') self.early_stop = True", "- val_loss < self.min_delta: self.counter += 1 if self.counter >= self.patience: print('Early stopping')", "counter was equal to 0 early_stop : bool if True, the training will", ": int number of time series loss : torch loss loss function argmax", "model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number", "max is applied dim : int number of time series Returns ------- torch.tensor", "evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over the", "argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true,", "import torch import torch.nn as nn import torch.nn.functional as F def rmspe(pred, true):", "loss and old loss for new loss to be considered as an improvement", "true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss, argmax=False,", "pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze()", "can be trained in the provided model\"\"\" return sum(p.numel() for p in model.parameters()", "trained in the provided model\"\"\" return sum(p.numel() for p in model.parameters() if p.requires_grad)", "self.best_loss == None: self.best_loss = val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss", "Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts", "how many epochs to wait before stopping when loss is not improving min_delta", "if validation loss improves self.counter = 0 elif self.best_loss - val_loss < self.min_delta:", "bool if True, the training will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\"", "break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int, optional", "def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred,", "pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true)", "epoch for which the counter was equal to 0 early_stop : bool if", "pred = []; true = [] for data_batch, label in data_loader: data_batch, label", "be considered as an improvement (default is 0.) \"\"\" self.patience = patience self.min_delta", "torch.tensor \"\"\" pred = [] for test_batch in test_loader: test_batch = test_batch.to(device) with", "not improve after certain epochs. Attributes ---------- patience : int how many epochs", "int number of epochs without improvement best_loss : float or NoneType validation loss", "loss to be considered as an improvement (default is 0.) \"\"\" self.patience =", "- pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true)", "is 'cuda') Returns ------- float \"\"\" pred = []; true = [] for", "out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights that", "model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return", "None self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch", "True, the training will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters ---------", "self.counter = 0 self.best_loss = None self.early_stop = False def __call__(self, val_loss): \"\"\"", "epochs to wait before stopping when loss is not improving min_delta : float", "RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return", "'cuda') Returns ------- float \"\"\" pred = []; true = [] for data_batch,", "new loss to be considered as an improvement counter : int number of", "\"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\"", "F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def", "device : str, optional 'cuda' or 'cpu', (default is 'cuda') Returns ------- float", "= torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device,", ": torch.utils.data.DataLoader data to predict on device : str 'cuda' or 'cpu' model", "to be considered as an improvement counter : int number of epochs without", "predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to", "be considered as an improvement counter : int number of epochs without improvement", "\"\"\" Parameters ---------- val_loss : torch loss or float \"\"\" if self.best_loss ==", "\"\"\" Parameters --------- patience : int, optional how many epochs to wait before", "the provided data Parameters ---------- model : torch.nn.Module model containing the weights to", "\"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict on device : str", "data to predict on device : str 'cuda' or 'cpu' model : torch.nn.Module", "to stop the training when the loss does not improve after certain epochs.", "model containing the weights to compute the predictions model_type : str 'regression' or", "\"\"\" if self.best_loss == None: self.best_loss = val_loss elif self.best_loss - val_loss >", "= val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss # reset", "\"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\"", "def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader, dim,", "float minimum difference between new loss and old loss for new loss to", "int number of time series loss : torch loss loss function argmax :", "considered as an improvement counter : int number of epochs without improvement best_loss", "true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader,", "== None: self.best_loss = val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss =", "False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch loss or float", "for p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop", "(default is 5) min_delta : float, optional minimum difference between new loss and", "patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int, optional how many epochs to", "true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def acc(pred, true): \"\"\"Computes", "int how many epochs to wait before stopping when loss is not improving", "certain epochs. Attributes ---------- patience : int how many epochs to wait before", ": float or NoneType validation loss of the last epoch for which the", "time series Returns ------- torch.tensor \"\"\" pred = [] for test_batch in test_loader:", "to 0 early_stop : bool if True, the training will break \"\"\" def", "when the loss does not improve after certain epochs. Attributes ---------- patience :", "which the counter was equal to 0 early_stop : bool if True, the", "str, optional 'cuda' or 'cpu', (default is 'cuda') Returns ------- float \"\"\" pred", "torch.utils.data.DataLoader data to evaluate dim : int number of time series loss :", "weights to compute the predictions data_loader : torch.utils.data.DataLoader data to evaluate dim :", "\"\"\" pred = []; true = [] for data_batch, label in data_loader: data_batch,", "True, an arg max is applied to predicted values (default is False) device", "0).squeeze() return loss(pred, true) def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ----------", "self.min_delta = min_delta self.counter = 0 self.best_loss = None self.early_stop = False def", ": int number of epochs without improvement best_loss : float or NoneType validation", "the latter an arg max is applied dim : int number of time", "dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict on device :", "-1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true", "over the provided data Parameters ---------- model : torch.nn.Module model containing the weights", "= None self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss :", "is applied to predicted values (default is False) device : str, optional 'cuda'", "def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int, optional how many", "loss is not improving (default is 5) min_delta : float, optional minimum difference", "Early stopping to stop the training when the loss does not improve after", "the last epoch for which the counter was equal to 0 early_stop :", "in the provided model\"\"\" return sum(p.numel() for p in model.parameters() if p.requires_grad) class", "data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim],", "\"\"\" self.patience = patience self.min_delta = min_delta self.counter = 0 self.best_loss = None", "the loss does not improve after certain epochs. Attributes ---------- patience : int", "out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of", "containing the weights to compute the predictions data_loader : torch.utils.data.DataLoader data to evaluate", "improving min_delta : float minimum difference between new loss and old loss for", "improving (default is 5) min_delta : float, optional minimum difference between new loss", "p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop the", ":dim*2], test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze()", "the training will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience", "------- torch.tensor \"\"\" pred = [] for test_batch in test_loader: test_batch = test_batch.to(device)", "0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights that can be trained in", "= out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights", "return torch.mean(torch.abs((true - pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred", "(default is False) device : str, optional 'cuda' or 'cpu', (default is 'cuda')", "of weights that can be trained in the provided model\"\"\" return sum(p.numel() for", "torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters", "model : torch.nn.Module model containing the weights to compute the predictions data_loader :", "torch import torch.nn as nn import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes", "with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :,", "self.best_loss = val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss #", "return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred)", "return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true", "if self.best_loss == None: self.best_loss = val_loss elif self.best_loss - val_loss > self.min_delta:", "mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def acc(pred, true):", "to evaluate dim : int number of time series loss : torch loss", "and old loss for new loss to be considered as an improvement counter", "max is applied to predicted values (default is False) device : str, optional", "Parameters --------- patience : int, optional how many epochs to wait before stopping", "= 0 elif self.best_loss - val_loss < self.min_delta: self.counter += 1 if self.counter", "torch loss or float \"\"\" if self.best_loss == None: self.best_loss = val_loss elif", "out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if", "argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over the provided data Parameters ----------", ": str 'regression' or 'classification', for the latter an arg max is applied", "number of time series Returns ------- torch.tensor \"\"\" pred = [] for test_batch", "loss to be considered as an improvement counter : int number of epochs", "argmax : bool, optional if True, an arg max is applied to predicted", ": bool if True, the training will break \"\"\" def __init__(self, patience=5, min_delta=0.):", "true = [] for data_batch, label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device)", "--------- patience : int, optional how many epochs to wait before stopping when", ":dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred = pred.argmax(1)", "to compute the predictions model_type : str 'regression' or 'classification', for the latter", "new loss to be considered as an improvement (default is 0.) \"\"\" self.patience", "early_stop : bool if True, the training will break \"\"\" def __init__(self, patience=5,", "0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device, model, model_type,", "\"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true", "= [] for data_batch, label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with", ": float, optional minimum difference between new loss and old loss for new", "0 self.best_loss = None self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters ----------", "pred = [] for test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out", "loss improves self.counter = 0 elif self.best_loss - val_loss < self.min_delta: self.counter +=", "0 elif self.best_loss - val_loss < self.min_delta: self.counter += 1 if self.counter >=", "as an improvement counter : int number of epochs without improvement best_loss :", "class EarlyStopping(): \"\"\" Early stopping to stop the training when the loss does", "stop the training when the loss does not improve after certain epochs. Attributes", "if True, an arg max is applied to predicted values (default is False)", "number of epochs without improvement best_loss : float or NoneType validation loss of", "float or NoneType validation loss of the last epoch for which the counter", "computes the loss over the provided data Parameters ---------- model : torch.nn.Module model", "elif self.best_loss - val_loss < self.min_delta: self.counter += 1 if self.counter >= self.patience:", "as nn import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true", "how many epochs to wait before stopping when loss is not improving (default", "series Returns ------- torch.tensor \"\"\" pred = [] for test_batch in test_loader: test_batch", "= min_delta self.counter = 0 self.best_loss = None self.early_stop = False def __call__(self,", "(default is 'cuda') Returns ------- float \"\"\" pred = []; true = []", "loss(pred, true) def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ---------- test_loader :", ":, -1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze()", "after certain epochs. Attributes ---------- patience : int how many epochs to wait", "Returns ------- float \"\"\" pred = []; true = [] for data_batch, label", "\"\"\"Predicts and computes the loss over the provided data Parameters ---------- model :", "- pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true))", "arg max is applied to predicted values (default is False) device : str,", "test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2],", "test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification':", "optional 'cuda' or 'cpu', (default is 'cuda') Returns ------- float \"\"\" pred =", "import torch.nn as nn import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\"", "applied to predicted values (default is False) device : str, optional 'cuda' or", "new loss and old loss for new loss to be considered as an", "applied dim : int number of time series Returns ------- torch.tensor \"\"\" pred", "counter if validation loss improves self.counter = 0 elif self.best_loss - val_loss <", "is not improving (default is 5) min_delta : float, optional minimum difference between", "improvement (default is 0.) \"\"\" self.patience = patience self.min_delta = min_delta self.counter =", "improvement counter : int number of epochs without improvement best_loss : float or", "minimum difference between new loss and old loss for new loss to be", "the predictions data_loader : torch.utils.data.DataLoader data to evaluate dim : int number of", "'cuda' or 'cpu', (default is 'cuda') Returns ------- float \"\"\" pred = [];", "self.best_loss = None self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss", "model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict on device", "---------- model : torch.nn.Module model containing the weights to compute the predictions data_loader", "return sum(p.numel() for p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping", "true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes", "self.min_delta: self.best_loss = val_loss # reset counter if validation loss improves self.counter =", "RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true -", ": torch.utils.data.DataLoader data to evaluate dim : int number of time series loss", "val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss # reset counter", "def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over", "the provided model\"\"\" return sum(p.numel() for p in model.parameters() if p.requires_grad) class EarlyStopping():", "loss of the last epoch for which the counter was equal to 0", "when loss is not improving (default is 5) min_delta : float, optional minimum", "without improvement best_loss : float or NoneType validation loss of the last epoch", "[]; true = [] for data_batch, label in data_loader: data_batch, label = data_batch.to(device),", "for test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :,", "that can be trained in the provided model\"\"\" return sum(p.numel() for p in", "wait before stopping when loss is not improving min_delta : float minimum difference", "model\"\"\" return sum(p.numel() for p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early", "loss for new loss to be considered as an improvement (default is 0.)", "test_loader : torch.utils.data.DataLoader data to predict on device : str 'cuda' or 'cpu'", "time series loss : torch loss loss function argmax : bool, optional if", "for new loss to be considered as an improvement counter : int number", "torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) /", "Parameters ---------- model : torch.nn.Module model containing the weights to compute the predictions", "the weights to compute the predictions data_loader : torch.utils.data.DataLoader data to evaluate dim", "patience : int, optional how many epochs to wait before stopping when loss", "= val_loss # reset counter if validation loss improves self.counter = 0 elif", "torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1])", "best_loss : float or NoneType validation loss of the last epoch for which", "the predictions model_type : str 'regression' or 'classification', for the latter an arg", ": str, optional 'cuda' or 'cpu', (default is 'cuda') Returns ------- float \"\"\"", "validation loss improves self.counter = 0 elif self.best_loss - val_loss < self.min_delta: self.counter", "'cpu', (default is 'cuda') Returns ------- float \"\"\" pred = []; true =", "pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred,", "epochs without improvement best_loss : float or NoneType validation loss of the last", "int, optional how many epochs to wait before stopping when loss is not", "elif self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss # reset counter if", "containing the weights to compute the predictions model_type : str 'regression' or 'classification',", "predicted values (default is False) device : str, optional 'cuda' or 'cpu', (default", ": str 'cuda' or 'cpu' model : torch.nn.Module model containing the weights to", "optional minimum difference between new loss and old loss for new loss to", "label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :,", "MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return", "data_batch[:, :, -1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred,", "loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over the provided data Parameters", "= data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]),", "is not improving min_delta : float minimum difference between new loss and old", "loss or float \"\"\" if self.best_loss == None: self.best_loss = val_loss elif self.best_loss", "true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return", "patience : int how many epochs to wait before stopping when loss is", "evaluate dim : int number of time series loss : torch loss loss", "many epochs to wait before stopping when loss is not improving min_delta :", "import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred)", "test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1])", "not improving min_delta : float minimum difference between new loss and old loss", "on device : str 'cuda' or 'cpu' model : torch.nn.Module model containing the", "true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true):", "dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over the provided data", "p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop the training when the loss", ": torch.nn.Module model containing the weights to compute the predictions data_loader : torch.utils.data.DataLoader", "was equal to 0 early_stop : bool if True, the training will break", "torch.mean(torch.abs((true - pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred ==", "Attributes ---------- patience : int how many epochs to wait before stopping when", "is 5) min_delta : float, optional minimum difference between new loss and old", "improves self.counter = 0 elif self.best_loss - val_loss < self.min_delta: self.counter += 1", "or NoneType validation loss of the last epoch for which the counter was", "label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:,", "-1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns", ": int how many epochs to wait before stopping when loss is not", "loss function argmax : bool, optional if True, an arg max is applied", "device : str 'cuda' or 'cpu' model : torch.nn.Module model containing the weights", "Parameters ---------- val_loss : torch loss or float \"\"\" if self.best_loss == None:", "counter : int number of epochs without improvement best_loss : float or NoneType", "data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2),", "arg max is applied dim : int number of time series Returns -------", "= test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if", "loss over the provided data Parameters ---------- model : torch.nn.Module model containing the", "label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:,", "or 'cpu' model : torch.nn.Module model containing the weights to compute the predictions", "rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true):", "self.best_loss - val_loss < self.min_delta: self.counter += 1 if self.counter >= self.patience: print('Early", "stopping to stop the training when the loss does not improve after certain", "\"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int, optional how", "of time series loss : torch loss loss function argmax : bool, optional", "torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes", "the counter was equal to 0 early_stop : bool if True, the training", "true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true) def", "= False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch loss or", "for data_batch, label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out", "- val_loss > self.min_delta: self.best_loss = val_loss # reset counter if validation loss", "min_delta : float minimum difference between new loss and old loss for new", "loss for new loss to be considered as an improvement counter : int", "an arg max is applied to predicted values (default is False) device :", "\"\"\" Early stopping to stop the training when the loss does not improve", "torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) /", "pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def", "dim : int number of time series Returns ------- torch.tensor \"\"\" pred =", "optional how many epochs to wait before stopping when loss is not improving", "as an improvement (default is 0.) \"\"\" self.patience = patience self.min_delta = min_delta", "the weights to compute the predictions model_type : str 'regression' or 'classification', for", "true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss", "loss is not improving min_delta : float minimum difference between new loss and", "torch loss loss function argmax : bool, optional if True, an arg max", "= model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out)", "= pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return", "= torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device, model, model_type, dim): \"\"\"", "str 'cuda' or 'cpu' model : torch.nn.Module model containing the weights to compute", "self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss # reset counter if validation", "true = torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device, model, model_type, dim):", "reset counter if validation loss improves self.counter = 0 elif self.best_loss - val_loss", "[] for test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:,", "epochs. Attributes ---------- patience : int how many epochs to wait before stopping", "= 0 self.best_loss = None self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters", "to compute the predictions data_loader : torch.utils.data.DataLoader data to evaluate dim : int", "torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out =", "if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop the training when the", "function argmax : bool, optional if True, an arg max is applied to", "in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :,", "the number of weights that can be trained in the provided model\"\"\" return", "of time series Returns ------- torch.tensor \"\"\" pred = [] for test_batch in", ": bool, optional if True, an arg max is applied to predicted values", "latter an arg max is applied dim : int number of time series", "def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def acc(pred,", "patience self.min_delta = min_delta self.counter = 0 self.best_loss = None self.early_stop = False", "considered as an improvement (default is 0.) \"\"\" self.patience = patience self.min_delta =", "'cuda' or 'cpu' model : torch.nn.Module model containing the weights to compute the", "data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the loss over the provided", "\"\"\" pred = [] for test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad():", "self.patience = patience self.min_delta = min_delta self.counter = 0 self.best_loss = None self.early_stop", "def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch loss or float \"\"\"", "predictions data_loader : torch.utils.data.DataLoader data to evaluate dim : int number of time", "equal to 0 early_stop : bool if True, the training will break \"\"\"", "EarlyStopping(): \"\"\" Early stopping to stop the training when the loss does not", "pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights that can", "will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int,", ": torch loss or float \"\"\" if self.best_loss == None: self.best_loss = val_loss", "def count_parameters(model): \"\"\"Returns the number of weights that can be trained in the", "improve after certain epochs. Attributes ---------- patience : int how many epochs to", "weights to compute the predictions model_type : str 'regression' or 'classification', for the", "torch.nn.Module model containing the weights to compute the predictions model_type : str 'regression'", "return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights that can be", "\"\"\"Returns the number of weights that can be trained in the provided model\"\"\"", "pred) / true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def", "# reset counter if validation loss improves self.counter = 0 elif self.best_loss -", "in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:,", "2), data_batch[:, :, -1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred =", "stopping when loss is not improving (default is 5) min_delta : float, optional", "---------- patience : int how many epochs to wait before stopping when loss", "and old loss for new loss to be considered as an improvement (default", "acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss,", "loss : torch loss loss function argmax : bool, optional if True, an", "data_batch, label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out =", "model : torch.nn.Module model containing the weights to compute the predictions model_type :", "self.early_stop = False def __call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch loss", "<filename>mtse/model_utils.py import torch import torch.nn as nn import torch.nn.functional as F def rmspe(pred,", "when loss is not improving min_delta : float minimum difference between new loss", "Returns ------- torch.tensor \"\"\" pred = [] for test_batch in test_loader: test_batch =", "true) def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader", "def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes", "and computes the loss over the provided data Parameters ---------- model : torch.nn.Module", ": int, optional how many epochs to wait before stopping when loss is", "epochs to wait before stopping when loss is not improving (default is 5)", "device, model, model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict", "float, optional minimum difference between new loss and old loss for new loss", "self.best_loss = val_loss # reset counter if validation loss improves self.counter = 0", ": int number of time series Returns ------- torch.tensor \"\"\" pred = []", "compute the predictions data_loader : torch.utils.data.DataLoader data to evaluate dim : int number", "dim : int number of time series loss : torch loss loss function", "test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out = model(test_batch[:, :, :dim*2], test_batch[:, :,", "== true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and computes the", "return loss(pred, true) def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ---------- test_loader", "/ true)) def acc(pred, true): \"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model,", "rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\"", "'classification', for the latter an arg max is applied dim : int number", "torch.utils.data.DataLoader data to predict on device : str 'cuda' or 'cpu' model :", "False) device : str, optional 'cuda' or 'cpu', (default is 'cuda') Returns -------", "nn import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true -", "before stopping when loss is not improving (default is 5) min_delta : float,", "provided model\"\"\" return sum(p.numel() for p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\"", "val_loss < self.min_delta: self.counter += 1 if self.counter >= self.patience: print('Early stopping') self.early_stop", "an arg max is applied dim : int number of time series Returns", "'regression' or 'classification', for the latter an arg max is applied dim :", "for which the counter was equal to 0 early_stop : bool if True,", "to wait before stopping when loss is not improving min_delta : float minimum", "def predict(test_loader, device, model, model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data", "val_loss : torch loss or float \"\"\" if self.best_loss == None: self.best_loss =", "data to evaluate dim : int number of time series loss : torch", "of the last epoch for which the counter was equal to 0 early_stop", "self.counter = 0 elif self.best_loss - val_loss < self.min_delta: self.counter += 1 if", "not improving (default is 5) min_delta : float, optional minimum difference between new", "before stopping when loss is not improving min_delta : float minimum difference between", "values (default is False) device : str, optional 'cuda' or 'cpu', (default is", "- pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5", "predictions model_type : str 'regression' or 'classification', for the latter an arg max", "model_type : str 'regression' or 'classification', for the latter an arg max is", "data_loader : torch.utils.data.DataLoader data to evaluate dim : int number of time series", "model, model_type, dim): \"\"\" Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict on", "if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred = torch.cat(pred, 0).squeeze() true =", "__call__(self, val_loss): \"\"\" Parameters ---------- val_loss : torch loss or float \"\"\" if", "old loss for new loss to be considered as an improvement counter :", "int number of time series Returns ------- torch.tensor \"\"\" pred = [] for", "0.) \"\"\" self.patience = patience self.min_delta = min_delta self.counter = 0 self.best_loss =", "to wait before stopping when loss is not improving (default is 5) min_delta", "[] for data_batch, label in data_loader: data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad():", "data_batch, label = data_batch.to(device), label.float().to(device) with torch.no_grad(): out = model(torch.cat((data_batch[:, :, :dim], data_batch[:,", "count_parameters(model): \"\"\"Returns the number of weights that can be trained in the provided", "in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop the training", "difference between new loss and old loss for new loss to be considered", "torch.nn as nn import torch.nn.functional as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return", "provided data Parameters ---------- model : torch.nn.Module model containing the weights to compute", "to be considered as an improvement (default is 0.) \"\"\" self.patience = patience", "= []; true = [] for data_batch, label in data_loader: data_batch, label =", "= model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax:", "= [] for test_batch in test_loader: test_batch = test_batch.to(device) with torch.no_grad(): out =", "'cpu' model : torch.nn.Module model containing the weights to compute the predictions model_type", "or 'classification', for the latter an arg max is applied dim : int", "for the latter an arg max is applied dim : int number of", "model(torch.cat((data_batch[:, :, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred", "for new loss to be considered as an improvement (default is 0.) \"\"\"", "val_loss): \"\"\" Parameters ---------- val_loss : torch loss or float \"\"\" if self.best_loss", "number of time series loss : torch loss loss function argmax : bool,", "to predicted values (default is False) device : str, optional 'cuda' or 'cpu',", "\"\"\"Computes Accuracy\"\"\" return torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'):", "does not improve after certain epochs. Attributes ---------- patience : int how many", "---------- val_loss : torch loss or float \"\"\" if self.best_loss == None: self.best_loss", "the training when the loss does not improve after certain epochs. Attributes ----------", "min_delta self.counter = 0 self.best_loss = None self.early_stop = False def __call__(self, val_loss):", "the loss over the provided data Parameters ---------- model : torch.nn.Module model containing", "< self.min_delta: self.counter += 1 if self.counter >= self.patience: print('Early stopping') self.early_stop =", "bool, optional if True, an arg max is applied to predicted values (default", "model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to stop the training when", "weights that can be trained in the provided model\"\"\" return sum(p.numel() for p", "sum(p.numel() for p in model.parameters() if p.requires_grad) class EarlyStopping(): \"\"\" Early stopping to", "None: self.best_loss = val_loss elif self.best_loss - val_loss > self.min_delta: self.best_loss = val_loss", "Parameters ---------- test_loader : torch.utils.data.DataLoader data to predict on device : str 'cuda'", "> self.min_delta: self.best_loss = val_loss # reset counter if validation loss improves self.counter", "improvement best_loss : float or NoneType validation loss of the last epoch for", "out = model(test_batch[:, :, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1)", "compute the predictions model_type : str 'regression' or 'classification', for the latter an", "torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the number of weights that can be trained", ":, :dim*2], test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred,", "float \"\"\" if self.best_loss == None: self.best_loss = val_loss elif self.best_loss - val_loss", ": torch.nn.Module model containing the weights to compute the predictions model_type : str", "= patience self.min_delta = min_delta self.counter = 0 self.best_loss = None self.early_stop =", "be trained in the provided model\"\"\" return sum(p.numel() for p in model.parameters() if", "predict on device : str 'cuda' or 'cpu' model : torch.nn.Module model containing", ":, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model):", "if True, the training will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters", "of epochs without improvement best_loss : float or NoneType validation loss of the", "return torch.mean(pred == true) def evaluate_model(model, data_loader, dim, loss, argmax=False, device='cuda'): \"\"\"Predicts and", "validation loss of the last epoch for which the counter was equal to", "is applied dim : int number of time series Returns ------- torch.tensor \"\"\"", "old loss for new loss to be considered as an improvement (default is", "val_loss > self.min_delta: self.best_loss = val_loss # reset counter if validation loss improves", "5) min_delta : float, optional minimum difference between new loss and old loss", "(default is 0.) \"\"\" self.patience = patience self.min_delta = min_delta self.counter = 0", "optional if True, an arg max is applied to predicted values (default is", "__init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience : int, optional how many epochs", "loss does not improve after certain epochs. Attributes ---------- patience : int how", "float \"\"\" pred = []; true = [] for data_batch, label in data_loader:", "torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader, device, model,", ": float minimum difference between new loss and old loss for new loss", "or 'cpu', (default is 'cuda') Returns ------- float \"\"\" pred = []; true", "as F def rmspe(pred, true): \"\"\"Computes RMSPE\"\"\" return torch.mean(((true - pred) / true)**2)**0.5", "NoneType validation loss of the last epoch for which the counter was equal", "training when the loss does not improve after certain epochs. Attributes ---------- patience", "---------- test_loader : torch.utils.data.DataLoader data to predict on device : str 'cuda' or", "many epochs to wait before stopping when loss is not improving (default is", "0 early_stop : bool if True, the training will break \"\"\" def __init__(self,", ":, :dim], data_batch[:, :, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred =", "last epoch for which the counter was equal to 0 early_stop : bool", "/ true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true - pred)**2)**0.5 def mape(pred,", "val_loss # reset counter if validation loss improves self.counter = 0 elif self.best_loss", "dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label) pred", "model containing the weights to compute the predictions data_loader : torch.utils.data.DataLoader data to", "wait before stopping when loss is not improving (default is 5) min_delta :", "pred)**2)**0.5 def mape(pred, true): \"\"\"Computes MAPE\"\"\" return torch.mean(torch.abs((true - pred) / true)) def", "------- float \"\"\" pred = []; true = [] for data_batch, label in", "torch.mean(((true - pred) / true)**2)**0.5 def rmse(pred, true): \"\"\"Computes RMSE\"\"\" return torch.mean((true -", "number of weights that can be trained in the provided model\"\"\" return sum(p.numel()", ":, dim:2*dim]), 2), data_batch[:, :, -1]) if argmax: pred = pred.argmax(1) pred.append(out); true.append(label)", "training will break \"\"\" def __init__(self, patience=5, min_delta=0.): \"\"\" Parameters --------- patience :", "data Parameters ---------- model : torch.nn.Module model containing the weights to compute the", "if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def count_parameters(model): \"\"\"Returns the", "torch.nn.Module model containing the weights to compute the predictions data_loader : torch.utils.data.DataLoader data", "pred = torch.cat(pred, 0).squeeze() true = torch.cat(true, 0).squeeze() return loss(pred, true) def predict(test_loader,", "an improvement (default is 0.) \"\"\" self.patience = patience self.min_delta = min_delta self.counter", "stopping when loss is not improving min_delta : float minimum difference between new", "min_delta=0.): \"\"\" Parameters --------- patience : int, optional how many epochs to wait", "test_batch[:, :, -1]) if model_type=='classification': out = out.argmax(1) pred.append(out) return torch.cat(pred, 0).squeeze() def", "is False) device : str, optional 'cuda' or 'cpu', (default is 'cuda') Returns" ]
[ "from dataset import HUTDataset from model.network import UNetLike from model.framework import Segmentation from", "less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def", "LoadConfig from dataset import HUTDataset from model.network import UNetLike from model.framework import Segmentation", "self.cp_path = \"\" self.visible = True self.log_interval = 5 self.save_interval = 5 self.less_data", "{ \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch,", "log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": # set config config = Config()", "8 self.load_to_ram = False self.batchsize = 32 self.device = [0, 1, 2, 3]", "\"__main__\": # set config config = Config() # data print('Loading Data') dataloaders =", "0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True)", "scipy import stats import numpy as np import torch import torch.nn as nn", "get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True,", "mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data =", "self.visible = True self.log_interval = 5 self.save_interval = 5 self.less_data = False self.debug", "= \"\" self.visible = True self.log_interval = 5 self.save_interval = 5 self.less_data =", "val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset", "config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss =", "= np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc =", "self.debug = False self.use_dct = False self.model_channel = 16 self.mask_down = 8 self.load_to_ram", "torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask =", "+ image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0),", "super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path =", "from torch.utils.data import DataLoader from base_train import train from util import LoadConfig from", "-> None: super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur'", "stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\":", "HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader,", "5 self.save_interval = 5 self.less_data = False self.debug = False self.use_dct = False", "= stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc", "\"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True", "val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] /", "self.use_dct = False self.model_channel = 16 self.mask_down = 8 self.load_to_ram = False self.batchsize", "set config config = Config() # data print('Loading Data') dataloaders = get_dataloaders(config) #", "\"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save):", "channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False }) model = Segmentation(model,", "model.loss import * class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info =", "train from util import LoadConfig from dataset import HUTDataset from model.network import UNetLike", "heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1)", "val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers'])", "torch.utils.data import DataLoader from base_train import train from util import LoadConfig from dataset", "DataLoader from base_train import train from util import LoadConfig from dataset import HUTDataset", "= False self.batchsize = 32 self.device = [0, 1, 2, 3] self.num_workers =", "= 8 self.load_to_ram = False self.batchsize = 32 self.device = [0, 1, 2,", "self.device = [0, 1, 2, 3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs", "self.model_channel = 16 self.mask_down = 8 self.load_to_ram = False self.batchsize = 32 self.device", "= 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader =", "get_dataloaders(config) # model and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False,", "'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler) # train train(config,", "pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'],", "trainable={ 'encoder': True, 'decoder': True, 'rnn': False }) model = Segmentation(model, criterion) #", "val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save,", "p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2,", "torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\":", "Config() # data print('Loading Data') dataloaders = get_dataloaders(config) # model and criterion criterion", "image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0,", "import train from util import LoadConfig from dataset import HUTDataset from model.network import", "DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss", "criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder':", "self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path", "image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx]", "model and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder':", "pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx", "from model.loss import * class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info", "offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return", "'encoder': True, 'decoder': True, 'rnn': False }) model = Segmentation(model, criterion) # optimizer", "and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True,", "self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval = 5 self.save_interval", "from scipy import stats import numpy as np import torch import torch.nn as", "axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss,", "DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader =", "\"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1,", "scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min',", "from util import LoadConfig from dataset import HUTDataset from model.network import UNetLike from", "val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear',", "axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0]", "= HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers'])", "stats import numpy as np import torch import torch.nn as nn from torch.utils.data", "p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True)", "= Config() # data print('Loading Data') dataloaders = get_dataloaders(config) # model and criterion", "import HUTDataset from model.network import UNetLike from model.framework import Segmentation from model.loss import", "= HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return", "\"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval = 5 self.save_interval = 5", "data print('Loading Data') dataloaders = get_dataloaders(config) # model and criterion criterion = SegmentationLoss()", "util import LoadConfig from dataset import HUTDataset from model.network import UNetLike from model.framework", "val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc =", "np import torch import torch.nn as nn from torch.utils.data import DataLoader from base_train", "less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'],", "print('Loading Data') dataloaders = get_dataloaders(config) # model and criterion criterion = SegmentationLoss() model", "'decoder': True, 'rnn': False }) model = Segmentation(model, criterion) # optimizer and scheduler", "= torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask", "model.network import UNetLike from model.framework import Segmentation from model.loss import * class Config(LoadConfig):", "= [0, 1, 2, 3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs =", "= \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible =", "val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]),", "/ val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1),", "drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'],", "self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval", "self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader", "= Segmentation(model, criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),", "mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap,", "= np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return {", "} def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate(", "# optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler =", "val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image,", "model.framework import Segmentation from model.loss import * class Config(LoadConfig): def __init__(self) -> None:", "import LoadConfig from dataset import HUTDataset from model.network import UNetLike from model.framework import", "self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum", "numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader", "8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'],", "pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True,", "HUTDataset from model.network import UNetLike from model.framework import Segmentation from model.loss import *", "<filename>train_seg.py<gh_stars>1-10 import os from scipy import stats import numpy as np import torch", "None: super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path", "epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]),", "(val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] +", "and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer,", "500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config):", "= torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler)", "UNetLike from model.framework import Segmentation from model.loss import * class Config(LoadConfig): def __init__(self)", "self.save_interval = 5 self.less_data = False self.debug = False self.use_dct = False self.model_channel", "(1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx]", "5 self.less_data = False self.debug = False self.use_dct = False self.model_channel = 16", "= 16 self.mask_down = 8 self.load_to_ram = False self.batchsize = 32 self.device =", "srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\":", "Data') dataloaders = get_dataloaders(config) # model and criterion criterion = SegmentationLoss() model =", "drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] /", "model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False })", "= \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval = 5 self.save_interval =", "import * class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info = \"\"", "SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False", "= val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image',", "epoch) if __name__ == \"__main__\": # set config config = Config() # data", "= val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0)", "from base_train import train from util import LoadConfig from dataset import HUTDataset from", "self.mask_down = 8 self.load_to_ram = False self.batchsize = 32 self.device = [0, 1,", "UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False }) model =", "self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay", "position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def", "__name__ == \"__main__\": # set config config = Config() # data print('Loading Data')", "dataloaders = get_dataloaders(config) # model and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'],", "False }) model = Segmentation(model, criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda", "patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler) # train train(config, dataloaders, model,", "train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss']", "= \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path =", "np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\":", "2, 3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr =", "+ image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch)", "* 2, verbose=True) optimizers = (optimizer, scheduler) # train train(config, dataloaders, model, optimizers,", "import DataLoader from base_train import train from util import LoadConfig from dataset import", "= np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay =", "def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True,", "import stats import numpy as np import torch import torch.nn as nn from", "image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__", "= 5 self.less_data = False self.debug = False self.use_dct = False self.model_channel =", "self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'],", "= nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap", "1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": # set config config =", "= False self.debug = False self.use_dct = False self.model_channel = 16 self.mask_down =", "criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler", "os from scipy import stats import numpy as np import torch import torch.nn", "= 500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def", "shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True,", "32 self.device = [0, 1, 2, 3] self.num_workers = 0 self.seed = np.random.randint(9999)", "False self.batchsize = 32 self.device = [0, 1, 2, 3] self.num_workers = 0", "= DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num):", "lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\":", "False self.model_channel = 16 self.mask_down = 8 self.load_to_ram = False self.batchsize = 32", "evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num position", "self.batchsize = 32 self.device = [0, 1, 2, 3] self.num_workers = 0 self.seed", "position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc,", "\"\" self.visible = True self.log_interval = 5 self.save_interval = 5 self.less_data = False", "pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num", "config config = Config() # data print('Loading Data') dataloaders = get_dataloaders(config) # model", "1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data,", "= 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data =", "val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2],", "dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": # set config", "= val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image = torch.clamp(torch.cat((heatmap, mask,", "* class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info = \"\" self.train_name", "class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info = \"\" self.train_name =", "import Segmentation from model.loss import * class Config(LoadConfig): def __init__(self) -> None: super(Config,", "= SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn':", "self.log_interval = 5 self.save_interval = 5 self.less_data = False self.debug = False self.use_dct", "self.lr = 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data", "HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data", "= torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval']", "Segmentation(model, criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr'])", "= val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc", "as np import torch import torch.nn as nn from torch.utils.data import DataLoader from", ")).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] +", "torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler) #", "= 32 self.device = [0, 1, 2, 3] self.num_workers = 0 self.seed =", "False self.use_dct = False self.model_channel = 16 self.mask_down = 8 self.load_to_ram = False", "verbose=True) optimizers = (optimizer, scheduler) # train train(config, dataloaders, model, optimizers, evaluation, val_plot)", "num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True,", "# model and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={", "as nn from torch.utils.data import DataLoader from base_train import train from util import", "optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5,", "__init__(self) -> None: super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path =", "0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum = 0.9", "1, 2, 3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr", "\"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\"", "val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0]", "\"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image", "= True self.log_interval = 5 self.save_interval = 5 self.less_data = False self.debug =", "import torch import torch.nn as nn from torch.utils.data import DataLoader from base_train import", "= DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader", "2, verbose=True) optimizers = (optimizer, scheduler) # train train(config, dataloaders, model, optimizers, evaluation,", "True self.log_interval = 5 self.save_interval = 5 self.less_data = False self.debug = False", "val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num position =", "import numpy as np import torch import torch.nn as nn from torch.utils.data import", "= False self.use_dct = False self.model_channel = 16 self.mask_down = 8 self.load_to_ram =", "val_num mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'],", "= get_dataloaders(config) # model and criterion criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'],", "3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4", "True, 'decoder': True, 'rnn': False }) model = Segmentation(model, criterion) # optimizer and", "model = Segmentation(model, criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,", "factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler) # train train(config, dataloaders,", "lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val',", "mode='val', less_data=False) val_data_loader = DataLoader(val_data, config['batchsize'], shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader", "return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss =", "stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc }", "torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] *", "\"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx =", "model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers", "image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": # set", "'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval = 5", "import os from scipy import stats import numpy as np import torch import", "self.max_epochs = 500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4 self.apply()", "False self.debug = False self.use_dct = False self.model_channel = 16 self.mask_down = 8", "base_train import train from util import LoadConfig from dataset import HUTDataset from model.network", "srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item()", "dataset import HUTDataset from model.network import UNetLike from model.framework import Segmentation from model.loss", "from model.framework import Segmentation from model.loss import * class Config(LoadConfig): def __init__(self) ->", "optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(", "nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap =", "= 5 self.save_interval = 5 self.less_data = False self.debug = False self.use_dct =", "# set config config = Config() # data print('Loading Data') dataloaders = get_dataloaders(config)", "import UNetLike from model.framework import Segmentation from model.loss import * class Config(LoadConfig): def", "mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0],", "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer,", "self.momentum = 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train',", "align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image =", "= 0 self.seed = np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum =", "return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss, \"SROCC\": srocc, \"LCC\": lcc } def val_plot(log_writer,", "16 self.mask_down = 8 self.load_to_ram = False self.batchsize = 32 self.device = [0,", "image, epoch) if __name__ == \"__main__\": # set config config = Config() #", "num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss", "== \"__main__\": # set config config = Config() # data print('Loading Data') dataloaders", "optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers = (optimizer, scheduler) # train", "np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1),", "Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\"", "torch.nn as nn from torch.utils.data import DataLoader from base_train import train from util", "train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False)", "self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\"", "idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True)", "mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": #", "import torch.nn as nn from torch.utils.data import DataLoader from base_train import train from", "mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx] image", "def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image = nn.functional.interpolate( val_save['image'],", "if __name__ == \"__main__\": # set config config = Config() # data print('Loading", "/ val_num mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset =", "train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'], lrud_move=True) train_data_loader = DataLoader(train_data, config['batchsize'], drop_last=True, shuffle=True, pin_memory=True,", "= 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible = True self.log_interval =", "val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx] + image[idx]", "criterion = SegmentationLoss() model = UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True,", "# data print('Loading Data') dataloaders = get_dataloaders(config) # model and criterion criterion =", "def evaluation(val_save, val_num): pred_loss = val_save['pred_loss'] / val_num mask_loss = val_save['mask_loss'] / val_num", "= 0.9 self.weight_decay = 1e-4 self.apply() def get_dataloaders(config): train_data = HUTDataset(path=config['dataset_path'], mode='train', less_data=config['less_data'],", "[0, 1, 2, 3] self.num_workers = 0 self.seed = np.random.randint(9999) self.max_epochs = 500", "torch import torch.nn as nn from torch.utils.data import DataLoader from base_train import train", "lcc } def val_plot(log_writer, epoch, val_save): idx = torch.randint(val_save['image'].shape[0], (1, )).item() image =", "val_save['image'], (val_save['mask'][idx].shape[-2], val_save['mask'][idx].shape[-1]), mode='bilinear', align_corners=True) mask = val_save['mask'][idx] + image[idx] heatmap = val_save['heatmap'][idx]", "= stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc = stats.pearsonr(offset.reshape(-1), position.reshape(-1))[0] return { \"Val_pred_loss\": pred_loss, \"Val_mask_loss\": mask_loss,", "'rnn': False }) model = Segmentation(model, criterion) # optimizer and scheduler optimizer =", "mask_loss = val_save['mask_loss'] / val_num position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0)", "= UNetLike(dct=config['use_dct'], channel=config['model_channel'], downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False }) model", "self.load_to_ram = False self.batchsize = 32 self.device = [0, 1, 2, 3] self.num_workers", "Segmentation from model.loss import * class Config(LoadConfig): def __init__(self) -> None: super(Config, self).__init__()", "True, 'rnn': False }) model = Segmentation(model, criterion) # optimizer and scheduler optimizer", "image[idx] image = torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if", "position = np.concatenate(val_save['position'], axis=0) offset = np.concatenate(val_save['offset'], axis=0) srocc = stats.spearmanr(offset.reshape(-1), position.reshape(-1))[0] lcc", "config['batchsize'], drop_last=True, shuffle=True, pin_memory=True, num_workers=config['num_workers']) val_data = HUTDataset(path=config['dataset_path'], mode='val', less_data=False) val_data_loader = DataLoader(val_data,", "0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ == \"__main__\": # set config config", "downsample=False, trainable={ 'encoder': True, 'decoder': True, 'rnn': False }) model = Segmentation(model, criterion)", "shuffle=True, drop_last=True, pin_memory=True, num_workers=config['num_workers']) return train_data_loader, val_data_loader def evaluation(val_save, val_num): pred_loss = val_save['pred_loss']", "self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path = 'dataset/HutIris-Blur' self.cp_path = \"checkpoints/1030_194858_UNetLike/80_2.2903e-03.pth\" self.cp_path = \"\" self.visible", "lr=config['lr']) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', factor=0.5, patience=config['log_interval'] * 2, verbose=True) optimizers =", "np.random.randint(9999) self.max_epochs = 500 self.lr = 8e-4 self.momentum = 0.9 self.weight_decay = 1e-4", "self.less_data = False self.debug = False self.use_dct = False self.model_channel = 16 self.mask_down", "= torch.clamp(torch.cat((heatmap, mask, image[idx]), dim=0), 0, 1) log_writer.add_image('Val/image', image, epoch) if __name__ ==", "config = Config() # data print('Loading Data') dataloaders = get_dataloaders(config) # model and", "}) model = Segmentation(model, criterion) # optimizer and scheduler optimizer = torch.optim.Adam(filter(lambda p:", "from model.network import UNetLike from model.framework import Segmentation from model.loss import * class", "nn from torch.utils.data import DataLoader from base_train import train from util import LoadConfig", "= False self.model_channel = 16 self.mask_down = 8 self.load_to_ram = False self.batchsize =", "def __init__(self) -> None: super(Config, self).__init__() self.info = \"\" self.train_name = \"UNetLike_enhance_Segmentation\" self.dataset_path" ]
[ "__all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter', 'MarshalItemExporter', 'BaseItemExporter', 'SplashScraperItems']", ":copyright: Copyright (C) 2018 by BOM Quote Limited :license: The MIT License, see", "export, write to file. :copyright: Copyright (C) 2018 by BOM Quote Limited :license:", "see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter,", "spreadsheet export, write to file. :copyright: Copyright (C) 2018 by BOM Quote Limited", "~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers", "to file. :copyright: Copyright (C) 2018 by BOM Quote Limited :license: The MIT", "import SplashScraperItems from .item import Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__", "from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems", "get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter', 'MarshalItemExporter',", "\"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to aid persistence, including", "BaseItemExporter) from .containers import SplashScraperItems from .item import Item, Field from .newt_db.newt_crud import", "including database, spreadsheet export, write to file. :copyright: Copyright (C) 2018 by BOM", "module implements classes and methods to aid persistence, including database, spreadsheet export, write", "# -*- coding: utf-8 -*- \"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and", "to aid persistence, including database, spreadsheet export, write to file. :copyright: Copyright (C)", "Copyright (C) 2018 by BOM Quote Limited :license: The MIT License, see LICENSE", ".exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from", "database, spreadsheet export, write to file. :copyright: Copyright (C) 2018 by BOM Quote", "utf-8 -*- \"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to aid", "(PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item import", "The MIT License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import", "file. :copyright: Copyright (C) 2018 by BOM Quote Limited :license: The MIT License,", "more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter)", "write to file. :copyright: Copyright (C) 2018 by BOM Quote Limited :license: The", "BOM Quote Limited :license: The MIT License, see LICENSE for more details. ~~~~~~~~~~~~", "Quote Limited :license: The MIT License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\"", "\"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import", "persistence, including database, spreadsheet export, write to file. :copyright: Copyright (C) 2018 by", ".item import Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field',", "from .containers import SplashScraperItems from .item import Item, Field from .newt_db.newt_crud import get_job_results,", "implements classes and methods to aid persistence, including database, spreadsheet export, write to", "2018 by BOM Quote Limited :license: The MIT License, see LICENSE for more", "License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter,", "MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item import Item, Field from .newt_db.newt_crud", "coding: utf-8 -*- \"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to", "SplashScraperItems from .item import Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__ =", "and methods to aid persistence, including database, spreadsheet export, write to file. :copyright:", "for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter,", "CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item import Item, Field from", "LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter,", "Field from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter',", "import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter',", "PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item import Item, Field", "from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter',", "Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item',", ".newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter', 'PythonItemExporter',", "-*- coding: utf-8 -*- \"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods", "classes and methods to aid persistence, including database, spreadsheet export, write to file.", "methods to aid persistence, including database, spreadsheet export, write to file. :copyright: Copyright", "(C) 2018 by BOM Quote Limited :license: The MIT License, see LICENSE for", "Limited :license: The MIT License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from", "import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item", ":license: The MIT License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters", "details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter, PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from", "delete_job __all__ = ['delete_job', 'Field', 'get_job_results', 'Item', 'PprintItemExporter', 'PickleItemExporter', 'PythonItemExporter', 'CsvItemExporter', 'MarshalItemExporter', 'BaseItemExporter',", "MIT License, see LICENSE for more details. ~~~~~~~~~~~~ \"\"\" from .exporters import (PprintItemExporter,", "~~~~~~~~~~~~ This module implements classes and methods to aid persistence, including database, spreadsheet", "This module implements classes and methods to aid persistence, including database, spreadsheet export,", "from .item import Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job',", "PickleItemExporter, PythonItemExporter, CsvItemExporter, MarshalItemExporter, BaseItemExporter) from .containers import SplashScraperItems from .item import Item,", ".containers import SplashScraperItems from .item import Item, Field from .newt_db.newt_crud import get_job_results, delete_job", "import Item, Field from .newt_db.newt_crud import get_job_results, delete_job __all__ = ['delete_job', 'Field', 'get_job_results',", "by BOM Quote Limited :license: The MIT License, see LICENSE for more details.", "transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to aid persistence, including database,", "-*- \"\"\" transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to aid persistence,", "aid persistence, including database, spreadsheet export, write to file. :copyright: Copyright (C) 2018" ]
[ "for qrow in range(Q): for prow in range(P): #first term locates z plane,", "1; #this is fundamentally 3D...not sure how to make general for 2D N", "#inputs: A, P, Q, R # A is the discrete representation of epsilon", "Ny = 20; Nz = 1; #this is fundamentally 3D...not sure how to", "## generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12;", "= 20; Nz = 1; #this is fundamentally 3D...not sure how to make", "harmonics (or orders) P = 6; Q = 6; R = 6; Nx", "3D...not sure how to make general for 2D N = np.array([Nx, Ny, Nz]);", "import numpy as np import matplotlib.pyplot as plt ## preliminary tests #inputs: A,", "= int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C", "= 6; R = 6; Nx = 20; Ny = 20; Nz =", "preliminary tests #inputs: A, P, Q, R # A is the discrete representation", "in range(Q): for prow in range(P): #first term locates z plane, 2nd locates", "range(R): for qcol in range(Q): for pcol in range(P): col = (rcol)*Q*P +", "6; Nx = 20; Ny = 20; Nz = 1; #this is fundamentally", "N = np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A = np.ones(N+1)", "pfft = p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow]", "the discrete representation of epsilon #number of spatial harmonics (or orders) P =", "list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af =", "= P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r =", "int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A));", "= p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow] -", "Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 =", "2): R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q", "A, P, Q, R # A is the discrete representation of epsilon #number", "NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r", "= q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft,", "# deal with different dimensionalities if(len(N) == 1): Q = 1; R =", "to make general for 2D N = np.array([Nx, Ny, Nz]); ## generalize two", "= np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R): for qrow in", "P, Q, R # A is the discrete representation of epsilon #number of", "C.astype(complex); for rrow in range(R): for qrow in range(Q): for prow in range(P):", "## preliminary tests #inputs: A, P, Q, R # A is the discrete", "range(Q): for pcol in range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft", "x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol in", "p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1));", "A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with", "orders) P = 6; Q = 6; R = 6; Nx = 20;", "r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0", "# A is the discrete representation of epsilon #number of spatial harmonics (or", "list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 =", "C = C.astype(complex); for rrow in range(R): for qrow in range(Q): for prow", "in range(R): for qcol in range(Q): for pcol in range(P): col = (rcol)*Q*P", "np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities", "r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show() plt.imshow(np.abs(C)); plt.show()", "P = 6; Q = 6; R = 6; Nx = 20; Ny", "+ prow; for rcol in range(R): for qcol in range(Q): for pcol in", "prow; for rcol in range(R): for qcol in range(Q): for pcol in range(P):", "q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex);", "q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft,", "2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() #", "20; Ny = 20; Nz = 1; #this is fundamentally 3D...not sure how", "different dimensionalities if(len(N) == 1): Q = 1; R = 1; elif(len(N) ==", "Nz = 1; #this is fundamentally 3D...not sure how to make general for", "list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2));", "for rcol in range(R): for qcol in range(Q): for pcol in range(P): col", "int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C =", "= int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for", "matplotlib.pyplot as plt ## preliminary tests #inputs: A, P, Q, R # A", "20; Nz = 1; #this is fundamentally 3D...not sure how to make general", "= C.astype(complex); for rrow in range(R): for qrow in range(Q): for prow in", "term locates z plane, 2nd locates y column, prow locates x row =", "as np import matplotlib.pyplot as plt ## preliminary tests #inputs: A, P, Q,", "1; R = 1; elif(len(N) == 2): R = 1; NH = P*Q*R;", "= 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) == 1): Q", "import matplotlib.pyplot as plt ## preliminary tests #inputs: A, P, Q, R #", "= 20; Ny = 20; Nz = 1; #this is fundamentally 3D...not sure", "1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1));", "- r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show() plt.imshow(np.abs(C));", "2nd locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for", "for qcol in range(Q): for pcol in range(P): col = (rcol)*Q*P + (qcol)*P", "q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:,", "numpy as np import matplotlib.pyplot as plt ## preliminary tests #inputs: A, P,", "A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N)", "deal with different dimensionalities if(len(N) == 1): Q = 1; R = 1;", "Ny, Nz]); ## generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0]", "int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for rrow", "Nz]); ## generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] =", "np import matplotlib.pyplot as plt ## preliminary tests #inputs: A, P, Q, R", "generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]);", "2D N = np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A =", "indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH,", "plt.show() # deal with different dimensionalities if(len(N) == 1): Q = 1; R", "row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol in range(Q):", "1): Q = 1; R = 1; elif(len(N) == 2): R = 1;", "0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) == 1):", "#number of spatial harmonics (or orders) P = 6; Q = 6; R", "qcol in range(Q): for pcol in range(P): col = (rcol)*Q*P + (qcol)*P +", "plt ## preliminary tests #inputs: A, P, Q, R # A is the", "= np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different", "rrow in range(R): for qrow in range(Q): for prow in range(P): #first term", "for prow in range(P): #first term locates z plane, 2nd locates y column,", "sure how to make general for 2D N = np.array([Nx, Ny, Nz]); ##", "6; Q = 6; R = 6; Nx = 20; Ny = 20;", "with different dimensionalities if(len(N) == 1): Q = 1; R = 1; elif(len(N)", "col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show() plt.imshow(np.abs(C)); plt.show() plt.plot(np.diag(abs(C))) plt.show()", "#first term locates z plane, 2nd locates y column, prow locates x row", "+ pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft", "is fundamentally 3D...not sure how to make general for 2D N = np.array([Nx,", "Nx = 20; Ny = 20; Nz = 1; #this is fundamentally 3D...not", "- q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft];", "for rrow in range(R): for qrow in range(Q): for prow in range(P): #first", "for pcol in range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft =", "make general for 2D N = np.array([Nx, Ny, Nz]); ## generalize two 2D", "locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol", "- p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row,", "== 2): R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p)", "np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A = np.ones(N+1) A[2:18, 2:18,", "int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R): for", "for 2D N = np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A", "pcol in range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow]", "= (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol in range(Q): for", "P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)),", "range(Q): for prow in range(P): #first term locates z plane, 2nd locates y", "int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0", "in range(P): #first term locates z plane, 2nd locates y column, prow locates", "12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) == 1): Q =", "epsilon #number of spatial harmonics (or orders) P = 6; Q = 6;", "== 1): Q = 1; R = 1; elif(len(N) == 2): R =", "spatial harmonics (or orders) P = 6; Q = 6; R = 6;", "range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] - p[pcol];", "qfft = q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row, col] =", "locates z plane, 2nd locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P", "C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show() plt.imshow(np.abs(C)); plt.show() plt.plot(np.diag(abs(C)))", "dimensionalities if(len(N) == 1): Q = 1; R = 1; elif(len(N) == 2):", "= 1; elif(len(N) == 2): R = 1; NH = P*Q*R; p =", "in range(P): col = (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] -", "R = 6; Nx = 20; Ny = 20; Nz = 1; #this", "two 2D geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show()", "Q = 1; R = 1; elif(len(N) == 2): R = 1; NH", "C = np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R): for qrow", "= 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)),", "np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R): for qrow in range(Q):", "plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) == 1): Q = 1;", "r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0])); plt.show()", "if(len(N) == 1): Q = 1; R = 1; elif(len(N) == 2): R", "+ (qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow] -", "col = (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft", "= np.array([Nx, Ny, Nz]); ## generalize two 2D geometries; A = np.ones(N+1) A[2:18,", "tests #inputs: A, P, Q, R # A is the discrete representation of", "pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft =", "range(P): #first term locates z plane, 2nd locates y column, prow locates x", "fundamentally 3D...not sure how to make general for 2D N = np.array([Nx, Ny,", "= r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :, 0]));", "column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R):", "6; R = 6; Nx = 20; Ny = 20; Nz = 1;", "#this is fundamentally 3D...not sure how to make general for 2D N =", "(rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol in range(Q): for pcol", "A is the discrete representation of epsilon #number of spatial harmonics (or orders)", "(or orders) P = 6; Q = 6; R = 6; Nx =", "R # A is the discrete representation of epsilon #number of spatial harmonics", "p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow] C[row, col]", "= int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for rrow in range(R):", "R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q =", "in range(Q): for pcol in range(P): col = (rcol)*Q*P + (qcol)*P + pcol;", "2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal with different dimensionalities if(len(N) ==", "locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for qcol", "plane, 2nd locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow;", "p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH))", "NH)) C = C.astype(complex); for rrow in range(R): for qrow in range(Q): for", "R = 1; elif(len(N) == 2): R = 1; NH = P*Q*R; p", "= (rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft =", "= list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1)); print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af", "Q = 6; R = 6; Nx = 20; Ny = 20; Nz", "print(p) q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central", "= list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0", "representation of epsilon #number of spatial harmonics (or orders) P = 6; Q", "of spatial harmonics (or orders) P = 6; Q = 6; R =", "= 1; R = 1; elif(len(N) == 2): R = 1; NH =", "(qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow] - q[qcol];", "as plt ## preliminary tests #inputs: A, P, Q, R # A is", "(rcol)*Q*P + (qcol)*P + pcol; pfft = p[prow] - p[pcol]; qfft = q[qrow]", "= 6; Nx = 20; Ny = 20; Nz = 1; #this is", "elif(len(N) == 2): R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)), int(np.floor(P/2))+1));", "int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2));", "rfft = r[rrow] - r[rrow] C[row, col] = Af[p0+pfft, q0+qfft, r0+rfft]; plt.imshow(np.abs(Af[:, :,", "discrete representation of epsilon #number of spatial harmonics (or orders) P = 6;", "= 6; Q = 6; R = 6; Nx = 20; Ny =", "1; elif(len(N) == 2): R = 1; NH = P*Q*R; p = list(range(-int(np.floor(P/2)),", "is the discrete representation of epsilon #number of spatial harmonics (or orders) P", "q = list(range(-int(np.floor(Q/2)), int(np.floor(Q/2))+1)); r = list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices;", "r0 = int(np.floor(Nz/2)); C = np.zeros((NH, NH)) C = C.astype(complex); for rrow in", "rcol in range(R): for qcol in range(Q): for pcol in range(P): col =", "how to make general for 2D N = np.array([Nx, Ny, Nz]); ## generalize", "prow in range(P): #first term locates z plane, 2nd locates y column, prow", "Q, R # A is the discrete representation of epsilon #number of spatial", "prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in range(R): for", "(1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C", "of epsilon #number of spatial harmonics (or orders) P = 6; Q =", "in range(R): for qrow in range(Q): for prow in range(P): #first term locates", "= (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2));", "p[prow] - p[pcol]; qfft = q[qrow] - q[qcol]; rfft = r[rrow] - r[rrow]", "= 1; #this is fundamentally 3D...not sure how to make general for 2D", "geometries; A = np.ones(N+1) A[2:18, 2:18, 0] = 12; plt.imshow(A[:,:,0]); plt.show() # deal", "z plane, 2nd locates y column, prow locates x row = (rrow)*Q*P+(qrow)*P +", "#central indices; p0 = int(np.floor(Nx/2)); q0 = int(np.floor(Ny/2)); r0 = int(np.floor(Nz/2)); C =", "qrow in range(Q): for prow in range(P): #first term locates z plane, 2nd", "general for 2D N = np.array([Nx, Ny, Nz]); ## generalize two 2D geometries;", "y column, prow locates x row = (rrow)*Q*P+(qrow)*P + prow; for rcol in", "range(R): for qrow in range(Q): for prow in range(P): #first term locates z", "= list(range(-int(np.floor(R/2)), int(np.floor(R/2))+1)); Af = (1/np.prod(N))*np.fft.fftshift(np.fft.fftn(A)); #central indices; p0 = int(np.floor(Nx/2)); q0 =" ]
[ "`numpy` for calculation. num_cpu {int} or `all` number of cpus duaring calculating transition", "self.x_smooth[t + 1] - self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\ #", "[n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval smoothed gain # \"\"\" #", "be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise", "value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys,", "positive integer. Attributes: y : `observation` F : `transition_matrix` Q : `transition_covariance` H", "x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at time t", "StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate =", "adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency matrix, if there", ">= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time =", "calculate predicted distribution for time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred =", "filter for observation times. Attributes: T {int} : length of data y x_pred", "to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As all state transitions", "@ self.H self.times[3] += time.time() - start_time self.times[4] += 1 self.F = self.F", "cupy self.use_gpu = True except: self.xp = np self.use_gpu = False else: self.xp", "transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix,", "= int((estimation_length - 1) / 2) self.tau = 2 * self.tau2 + 1", "def _predict_update(self, t): \"\"\"Calculate fileter update Args: t {int} : observation time \"\"\"", "+ self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate RTS smooth for times.", "{int} : dimensionality for extract from RTS smoothed result # Returns (numpy-array, float)", "of hidden state at time t given observations from times [0...t] \"\"\" #", "self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction and filter for every time", "self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean", ":math:`H`. observation matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys,", "t given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance", "n_dim_sys] {numpy-array, float} # : mean of hidden state distributions for times #", "range(T): # visualize calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T),", "matrix F save_dir {str, directory-like} directory for saving transition matrices and filtered states.", "observation dimenstions \"all-average\": average calculation for each observation dimenstions update_interval {int} interval of", "import multiprocessing as mp import itertools import numpy as np from .utils import", "= \"backward\", save_dir = None, n_dim_sys = None, n_dim_obs = None, dtype =", "dtype = dtype) if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype =", "-1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality(", "\" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2", "\"\"\"Get predicted value Args: dim {int} : dimensionality for extract from predicted result", "of numpy-array use_gpu {bool} wheather use gpu and cupy. if True, you need", "of update transition matrix F eta (in (0,1]) update rate for update transition", "= self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if", "self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F)", "= estimation_interval self.tm_count = 1 if save_dir is None: self.save_change = False else:", "- 1, 2, self.use_gpu) # calculate predicted distribution for time t self.x_pred[t] =", "of hidden state at time t given observations from times [0...t-1] \"\"\" #", "implement `forward`, implement `forward` try : self.x_filt[0] except : self.forward() if dim is", "w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is an algorithm designed", "self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if", "all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval smoothed", "self.HI @ G @ self.H else: self.times[2] += time.time() - start_time Fh =", "C[C==0] = 1 #AA G /= C #AA if self.tm_count==1: self.F = self.HI", "#LA G[global_node_number, i] += Gh[:, local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\":", "@ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number]", "{numpy-array, float} also known as :math:`A`. adjacency matrix, if there is a link", "if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def", "is reversed from 1~T) # for t in reversed(range(T - 1)) : #", "self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh =", "A[i,j]=0. Besides, you should A[i,i]=1 forall i. method {string} : method for localized", "use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try:", "save_dir is None: self.save_change = False else: self.save_change = True self.save_dir = save_dir", "[(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) #", "self.F = self.HI @ G @ self.H else: self.times[2] += time.time() - start_time", "result # Returns (numpy-array, float) # : mean of hidden state at time", "for specific two observation dimenstions \"all-average\": average calculation for each observation dimenstions update_interval", "self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\", "{int} : observation time \"\"\" # extract parameters for time t-1 Q =", "self.num_cpu = num_cpu self.eta = eta self.cutoff = cutoff self.dtype = dtype self.times", "self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt =", "itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs):", "array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y):", "self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu", "\"\"\"Get filtered value Args: dim {int} : dimensionality for extract from filtered result", "distributions for times # [0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys]", "print(\"\\r smooth calculating... t={}\".format(T - t) # + \"/\" + str(T), end=\"\") #", "given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of", "- (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred - K @ (self.H @", "# V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of hidden state", "except : # self.smooth() # if dim is None: # return self.x_smooth #", "of hidden state at time t given observations from times [0...t] \"\"\" T", "dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype", "+= 1 #AA C[C==0] = 1 #AA G /= C #AA if self.tm_count==1:", "self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix,", "# [0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} #", "self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation,", "import numpy as np from .utils import array1d, array2d from .util_functions import _parse_observations,", "can set `all` or positive integer. Attributes: y : `observation` F : `transition_matrix`", "if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args: t", "start_time = time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get() y", "t given observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean", "p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": # local-average", "y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0,", "dtype = \"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\"", "LOCK. This class implements the LLOCK, for a Linear Gaussian model specified by,", "self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] #", "\\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T)) #", "gpu and cupy. if True, you need install package `cupy`. if False, set", "matrix. you can set `all` or positive integer. Attributes: y : `observation` F", "1, eta = 1., cutoff = 10., estimation_mode = \"backward\", save_dir = None,", "\"\"\"Calculate RTS smooth for times. # Args: # T : length of data", "int(estimation_length) self.tau2 = int((estimation_length - 1) / 2) else: self.tau2 = int((estimation_length -", "= self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1],", "getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate", "t-1 Q = _last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate predicted distribution", "self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim must be less than '", "O], [O, R_{t}]]) The LLOCK is an algorithm designed to estimate :math:`P(x_t |", "can be represented exactly as Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`.", ": mean of hidden state at time t given observations from times [0...t]", "Args: t {int} : observation time \"\"\" # extract parameters for time t-1", "= dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None:", "def get_filtered_value(self, dim = None): \"\"\"Get filtered value Args: dim {int} : dimensionality", "self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction and filter for", "R = _last_dims(self.R, t, 2, self.use_gpu) # calculate filter step K = self.V_pred", "elif t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2)", "F eta (in (0,1]) update rate for update transition matrix F cutoff cutoff", "self.H self.times[3] += time.time() - start_time self.times[4] += 1 self.F = self.F -", "[O, R_{t}]]) The LLOCK is an algorithm designed to estimate :math:`P(x_t | y_{0:t})`", "`V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`. observation value", "= self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys),", "self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim must be less than '", "# self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t", "self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args: t {int} : observation time", "raise ValueError('The dim must be less than ' + self.x_filt.shape[1] + '.') #", "time.time() self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt", "# self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean =", "raise ValueError('The dim must be less than ' + self.x_pred.shape[1] + '.') def", "else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None: self.H =", "1]) # self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] -", "smoothed result # Returns (numpy-array, float) # : mean of hidden state at", "self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys),", "= None, n_dim_obs = None, dtype = \"float32\", use_gpu = False, num_cpu =", "self.x_pred[0] except : self.forward() if dim is None: return self.x_pred elif dim <=", "@ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK.", ": # # visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T - t)", "or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`. system transition covariance observation_covariance", "R_{t}]]) The LLOCK is an algorithm designed to estimate :math:`P(x_t | y_{0:t})` and", "= self.x_pred[t] self.V_filt = self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1] +=", "(t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t)", "given observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of", "else: self.save_change = True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir,", "`forward`, implement `forward` try : self.x_pred[0] except : self.forward() if dim is None:", "mean of hidden state at time t given observations from times [0...t-1] \"\"\"", "{numpy-array, float} : Kalman gain matrix for time t \"\"\" # extract parameters", "reversed(range(T - 1)) : # # visualize calculating times # print(\"\\r smooth calculating...", "= _last_dims(self.F, t, 2) # # calculate fixed interval smoothing gain # A", "\"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in", "time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get()", "mp import itertools import numpy as np from .utils import array1d, array2d from", "H : `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean", "Besides, you should A[i,i]=1 forall i. method {string} : method for localized calculation", "= method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or", "determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d,", "at time t given observations # from times [0...T] # \"\"\" # #", "global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i,", "observation times. Attributes: T {int} : length of data y x_pred [n_time, n_dim_sys]", "# extract parameters for time t # F = _last_dims(self.F, t, 2) #", "(numpy-array, float) # : mean of hidden state at time t given observations", "[n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix from", "from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A", "None): # \"\"\"Get RTS smoothed value # Args: # dim {int} : dimensionality", "get_predicted_value(self, dim = None): \"\"\"Get predicted value Args: dim {int} : dimensionality for", "self.eta = eta self.cutoff = cutoff self.dtype = dtype self.times = self.xp.zeros(5) def", "< T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] +=", "np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This", "observations # from times [0...T] # \"\"\" # # if not implement `smooth`,", ": observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix", "DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False", ".. math:: x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t} &=", "x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`. observation", "= np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close()", "for time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt", "self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1] where_is_A =", "= None): \"\"\"Get predicted value Args: dim {int} : dimensionality for extract from", "dtype = bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else:", "calculation. num_cpu {int} or `all` number of cpus duaring calculating transition matrix. you", "matrix after filtering. n_dim_sys {int} dimension of system transition variable n_dim_obs {int} dimension", "predicted distribution for time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F", "= self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing # self.x_smooth[t]", "1., cutoff = 10., estimation_mode = \"backward\", save_dir = None, n_dim_sys = None,", "/ 2) self.tau = 2 * self.tau2 + 1 self.I = estimation_interval self.tm_count", "_last_dims(self.F, t, 2) # # calculate fixed interval smoothing gain # A =", "you can set `all` or positive integer. Attributes: y : `observation` F :", "self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1,", "\"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" +", "local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T", "= None, method = \"elementwise\", estimation_length = 10, estimation_interval = 1, eta =", "self.save_change = True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\"", "self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A", "interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1]", "system transition variable n_dim_obs {int} dimension of observation variable dtype {type} data type", "cannot save them. advance_mode {bool} if True, calculate transition matrix before filtering. if", "else A[i,j]=0. Besides, you should A[i,i]=1 forall i. method {string} : method for", "in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1) / 2)", "self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI =", "is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype", "step K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T)", "self.n_dim_sys), dtype = self.dtype) # calculate prediction and filter for every time for", "less than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None): \"\"\"Get", "for each observation dimenstions update_interval {int} interval of update transition matrix F eta", "is None: return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise", "type of numpy-array use_gpu {bool} wheather use gpu and cupy. if True, you", "= self.V_filt[-1] # # t in [0, T-2] (notice t range is reversed", "given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of", "' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None): \"\"\"Get filtered value", "t given observations # from times [0...T] # \"\"\" # # if not", "and filter for every time for t in range(T): # visualize calculating time", "adjacency matrix, if there is a link between i and j, A[i,j]=1, else", "self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype =", "j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall i. method {string} :", "self.V_smooth[-1] = self.V_filt[-1] # # t in [0, T-2] (notice t range is", "= self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method =", "= self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R", "self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number]", "+ \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim = None): \"\"\"Get predicted", "filter step K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @", "variable n_dim_obs {int} dimension of observation variable dtype {type} data type of numpy-array", "time t given observations from times [0...t] \"\"\" # if not implement `forward`,", "= None, initial_covariance = None, transition_matrix = None, observation_matrix = None, transition_covariance =", "= False else: self.xp = np self.use_gpu = False # determine dimensionality self.n_dim_sys", "= self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] +=", "@ G @ self.H self.times[3] += time.time() - start_time self.times[4] += 1 self.F", "&= H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t},", "for t in reversed(range(T - 1)) : # # visualize calculating times #", "else: A = self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu)", "LOCK for Linear-Gaussian state space models \"\"\" from logging import getLogger, StreamHandler, DEBUG", "wheather use gpu and cupy. if True, you need install package `cupy`. if", "dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is", "`forward` try : self.x_pred[0] except : self.forward() if dim is None: return self.x_pred", "i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t,", "for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh =", ": self.x_pred[0] except : self.forward() if dim is None: return self.x_pred elif dim", "def forward(self): \"\"\"Calculate prediction and filter for observation times. Attributes: T {int} :", "F cutoff cutoff distance for update transition matrix F save_dir {str, directory-like} directory", ": length of data y # x_smooth [n_time, n_dim_sys] {numpy-array, float} # :", "self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim", "observation variable dtype {type} data type of numpy-array use_gpu {bool} wheather use gpu", "{numpy-array, float} also known as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs]", "if t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1)", "observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H =", "times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at", "t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif", "visualize calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if", "- self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" +", ":math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency", "module implements the Local LOCK for Linear-Gaussian state space models \"\"\" from logging", "parameters. \"\"\" if use_gpu: try: import cupy self.xp = cupy self.use_gpu = True", "distribution for time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F @", "self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim must be less than '", "[n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of hidden state distributions for", "known as :math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array,", "observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency matrix,", "#AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1 #AA G /= C", "@ self.x_pred[t]) ) self.V_filt = self.V_pred - K @ (self.H @ self.V_pred) def", "is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype", "also known as :math:`H`. observation matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys,", "is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix,", "update Args: t {int} : observation time \"\"\" # extract parameters for time", "set `all` or positive integer. Attributes: y : `observation` F : `transition_matrix` Q", "dim is None: return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else:", "self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not None: self.Q =", "import math import time import multiprocessing as mp import itertools import numpy as", ") self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)],", "return self.x_filt[:, int(dim)] else: raise ValueError('The dim must be less than ' +", "# def smooth(self): # \"\"\"Calculate RTS smooth for times. # Args: # T", "there is a link between i and j, A[i,j]=1, else A[i,j]=0. Besides, you", "known as :math:`H`. observation matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys]", "{int} dimension of system transition variable n_dim_obs {int} dimension of observation variable dtype", "G @ self.H else: self.times[2] += time.time() - start_time Fh = self.HI @", "else: self.tau2 = int((estimation_length - 1) / 2) self.tau = 2 * self.tau2", "self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys,", "for t in range(T): # visualize calculating time print(\"\\r filter calculating... t={}\".format(t) +", "# + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1]) # self.V_smooth[t] =", "# self.V_smooth[-1] = self.V_filt[-1] # # t in [0, T-2] (notice t range", "- t) # + \"/\" + str(T), end=\"\") # # extract parameters for", "cpus duaring calculating transition matrix. you can set `all` or positive integer. Attributes:", "gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval", "= self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R)", "self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t]", "hidden state at time t given observations # from times [0...T] # \"\"\"", "matrix Args: t {int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype)", "else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is", "filtered result Returns (numpy-array, float) : mean of hidden state at time t", "be less than ' + self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate", "adjacency_matrix = None, method = \"elementwise\", estimation_length = 10, estimation_interval = 1, eta", "dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance", "2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i", "saving transition matrices and filtered states. if this variable is `None`, cannot save", "from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau", "also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known", "self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and t < T-self.tau+1", "and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t < T-self.tau2", "and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0", "_parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A = A[i] |", "float) # : mean of hidden state at time t given observations #", "dim {int} : dimensionality for extract from predicted result Returns (numpy-array, float) :", "= y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) :", "= _last_dims(self.R, t, 2, self.use_gpu) # calculate filter step K = self.V_pred @", "of cpus duaring calculating transition matrix. you can set `all` or positive integer.", "= dtype) if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype)", "Args: dim {int} : dimensionality for extract from filtered result Returns (numpy-array, float)", "designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As all state", "1, 2, self.use_gpu) # calculate predicted distribution for time t self.x_pred[t] = self.F", "self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1) / 2) else: self.tau2 =", "= \"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if", "setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2", "= _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype", "Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA G /= 2.0 #LA elif", "use_gpu: try: import cupy self.xp = cupy self.use_gpu = True except: self.xp =", "self.forward() if dim is None: return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:,", "save_dir = None, n_dim_sys = None, n_dim_obs = None, dtype = \"float32\", use_gpu", "t): \"\"\"Update transition matrix Args: t {int} : observation time \"\"\" G =", "# [0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} # :", "= self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys,", "with mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also", "from filtered result Returns (numpy-array, float) : mean of hidden state at time", "self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing # self.x_smooth[t] =", "-2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y =", "elementwise if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A", "hidden state at time t given observations from times [0...t-1] \"\"\" # if", "ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode", "self.save_change = False else: self.save_change = True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0]", "visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T - t) # + \"/\"", "matrix F eta (in (0,1]) update rate for update transition matrix F cutoff", "if dim is None: return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)]", "self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh", "None): \"\"\"Get predicted value Args: dim {int} : dimensionality for extract from predicted", "prediction and filter for observation times. Attributes: T {int} : length of data", "and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1", "[n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at time t given", "{numpy-array, float} also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float}", "= None, transition_covariance = None, observation_covariance = None, adjacency_matrix = None, method =", "also known as :math:`y`. observation value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`.", "t given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance", "from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state", "end=\"\") # # extract parameters for time t # F = _last_dims(self.F, t,", "ValueError('The dim must be less than ' + self.x_filt.shape[1] + '.') # def", "self.times[4] += 1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh),", "self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A", "noise Args: t {int} : observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float}", "[0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt", "dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim must be less", "y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as", "mean of hidden state at time t given observations # from times [0...T]", "-2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y =", "n_dim_obs {int} dimension of observation variable dtype {type} data type of numpy-array use_gpu", "\"\"\" def __init__(self, observation = None, initial_mean = None, initial_covariance = None, transition_matrix", "= \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import cupy self.xp =", "ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting", "self.times[2] += time.time() - start_time Fh = self.HI @ G @ self.H self.times[3]", "hidden state at time t given observations from times [0...t] \"\"\" # if", "# if dim is None: # return self.x_smooth # elif dim <= self.x_smooth.shape[1]:", "t - 1, 2, self.use_gpu) # calculate predicted distribution for time t self.x_pred[t]", "Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1 #AA G /=", "= self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), #", "transition_matrix = None, observation_matrix = None, transition_covariance = None, observation_covariance = None, adjacency_matrix", "[\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from", "1 #AA G /= C #AA if self.tm_count==1: self.F = self.HI @ G", "self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t] - (self.H @ self.x_pred[t]) )", "# elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: # raise", "self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool)", "distributions with mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float}", "self.filter() # T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)", "self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for observation times. Attributes:", "covariance of hidden state at time t given observations from times [0...t-1] x_filt", "+ self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T)) # def get_smoothed_value(self,", "dimenstions update_interval {int} interval of update transition matrix F eta (in (0,1]) update", "None: # return self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)]", "extract from RTS smoothed result # Returns (numpy-array, float) # : mean of", "y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements", "Filter ===================================================================== This module implements the Local LOCK for Linear-Gaussian state space models", "as :math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float}", "(initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix,", "state at time t given observations from times [0...t] \"\"\" T = self.y.shape[0]", "- self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t", "of data y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state", "T : length of data y # x_smooth [n_time, n_dim_sys] {numpy-array, float} #", "self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype =", "= A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0]", "+ self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None): \"\"\"Get filtered value Args:", "= self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) #", "# dim {int} : dimensionality for extract from RTS smoothed result # Returns", "logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import math import time import multiprocessing", "A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh", "2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >=", "import itertools import numpy as np from .utils import array1d, array2d from .util_functions", "True, you need install package `cupy`. if False, set `numpy` for calculation. num_cpu", "= False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d,", "for extract from filtered result Returns (numpy-array, float) : mean of hidden state", "self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys,", "= False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import", "observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of hidden", "time t given observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} :", "= self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and t < T-self.tau+1 and", "times [0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)", "None, initial_covariance = None, transition_matrix = None, observation_matrix = None, transition_covariance = None,", "global_node_number)] += 1 #AA C[C==0] = 1 #AA G /= C #AA if", "# if not implement `smooth`, implement `smooth` # try : # self.x_smooth[0] #", "transition matrix before filtering. if False, calculate the matrix after filtering. n_dim_sys {int}", "None: self.save_change = False else: self.save_change = True self.save_dir = save_dir self.fillnum =", "self.times[3] += time.time() - start_time self.times[4] += 1 self.F = self.F - self.eta", "smooth(self): # \"\"\"Calculate RTS smooth for times. # Args: # T : length", "else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\")", "= self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu:", "self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\"", "&= F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t} +", "@ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int} : observation", "{int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time()", ": self.forward() if dim is None: return self.x_filt elif dim <= self.x_filt.shape[1]: return", "as :math:`H`. observation matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or", "G /= 2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA", "(self.V_pred @ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] + K @ (", ": observation time \"\"\" # extract parameters for time t-1 Q = _last_dims(self.Q,", "cutoff = 10., estimation_mode = \"backward\", save_dir = None, n_dim_sys = None, n_dim_obs", "RTS smoothed value # Args: # dim {int} : dimensionality for extract from", "+ 1] - self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\ # +", "None, observation_covariance = None, adjacency_matrix = None, method = \"elementwise\", estimation_length = 10,", "A[i,i]=1 forall i. method {string} : method for localized calculation \"elementwise\": calculation for", "at time t given observations from times [0...t] \"\"\" # if not implement", "dim = None): # \"\"\"Get RTS smoothed value # Args: # dim {int}", "np self.use_gpu = False else: self.xp = np self.use_gpu = False # determine", "\"\"\"Update transition matrix Args: t {int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs,", "dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is", "{int} interval of update transition matrix F eta (in (0,1]) update rate for", "self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype =", "given observations from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys),", "[0...t] \"\"\" # if not implement `forward`, implement `forward` try : self.x_filt[0] except", "self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1) /", "self.tm_count += 1 def get_predicted_value(self, dim = None): \"\"\"Get predicted value Args: dim", "cutoff cutoff distance for update transition matrix F save_dir {str, directory-like} directory for", "+= Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA G /= 2.0 #LA", "predicted result Returns (numpy-array, float) : mean of hidden state at time t", "\\ # + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1]) # self.V_smooth[t]", "np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A]", "gain matrix for time t \"\"\" # extract parameters for time t R", "dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None: self.A", "distributions for times # [0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys] {numpy-array,", "* self.tau2 + 1 self.I = estimation_interval self.tm_count = 1 if save_dir is", "{numpy-array, float} also known as :math:`y`. observation value initial_mean [n_dim_sys] {float} also known", "self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self,", "\\\\ y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim", "dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T", "mean of hidden state distributions for times # [0...n_times-1] given all observations #", "initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean,", "= self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if", "by, .. math:: x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t}", "given all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval", "# # calculate fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t", "calculating... t={}\".format(T - t) # + \"/\" + str(T), end=\"\") # # extract", "= time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get() y =", "transition matrix \"local-average\": average calculation for specific two observation dimenstions \"all-average\": average calculation", "self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1", "# : covariances of hidden state distributions for times # [0...n_times-1] given all", "`filter`, implement `filter` # try : # self.x_pred[0] # except : # self.filter()", "Linear Operator Construction with Kalman Filter ===================================================================== This module implements the Local LOCK", "{float} also known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float}", "def _local_calculation(i, j, A, y): local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0])", "dim must be less than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim", "each observation dimenstions update_interval {int} interval of update transition matrix F eta (in", "where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y)))", "package `cupy`. if False, set `numpy` for calculation. num_cpu {int} or `all` number", "Kalman gain matrix for time t \"\"\" # extract parameters for time t", "is an algorithm designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time.", "- start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update", "[0...T] # \"\"\" # # if not implement `smooth`, implement `smooth` # try", "self.tau2 + 1 self.I = estimation_interval self.tm_count = 1 if save_dir is None:", "implements the Local LOCK for Linear-Gaussian state space models \"\"\" from logging import", "# extract parameters for time t R = _last_dims(self.R, t, 2, self.use_gpu) #", "V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of hidden state distributions", "elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: # raise ValueError('The", "# \"\"\"Calculate RTS smooth for times. # Args: # T : length of", "\\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode =", "y): local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number", "`filter` # try : # self.x_pred[0] # except : # self.filter() # T", "observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval smoothed gain", "`observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean = None, initial_covariance = None,", "+ 1]))) # # fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ #", "# try : # self.x_pred[0] # except : # self.filter() # T =", "[0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at time", "or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix from x_{t-1} to", "this variable is `None`, cannot save them. advance_mode {bool} if True, calculate transition", "covariances of hidden state distributions for times # [0...n_times-1] given all observations #", "# \"\"\" # # if not implement `filter`, implement `filter` # try :", "your setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]:", "self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\", "range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number,", "self.F @ self.V_filt @ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter update", "if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H", "= None, initial_mean = None, initial_covariance = None, transition_matrix = None, observation_matrix =", "\\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to be changed.\".format(method))", "and filtered states. if this variable is `None`, cannot save them. advance_mode {bool}", "to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known", "for times # [0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float}", "time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": #", "self.tau2 = int((estimation_length - 1) / 2) self.tau = 2 * self.tau2 +", "else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So,", "= None, n_dim_sys = None, n_dim_obs = None, dtype = \"float32\", use_gpu =", "False import os import math import time import multiprocessing as mp import itertools", "StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import math import time", "at time t given observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float}", "+ \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode", "self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else : start_time = time.time() self._filter_update(t)", "mean of hidden state at time t given observations from times [0...t] \"\"\"", "in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T", "# print(\"\\r smooth calculating... t={}\".format(T - t) # + \"/\" + str(T), end=\"\")", "estimation_length = 10, estimation_interval = 1, eta = 1., cutoff = 10., estimation_mode", "state at time t given observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array,", "localized calculation \"elementwise\": calculation for each element of transition matrix \"local-average\": average calculation", "int(dim)] # else: # raise ValueError('The dim must be less than ' #", "\"\"\" # # if not implement `smooth`, implement `smooth` # try : #", "length of data y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of hidden", "+= time.time() - start_time Fh = self.HI @ G @ self.H self.times[3] +=", "Local Linear Operator Construction with Kalman Filter ===================================================================== This module implements the Local", "distributions can be represented exactly as Gaussian distributions with mean `x_filt[t]` and covariances", "K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix for time t \"\"\"", "self.x_smooth[0] # except : # self.smooth() # if dim is None: # return", "Linear-Gaussian state space models \"\"\" from logging import getLogger, StreamHandler, DEBUG logger =", "element of transition matrix \"local-average\": average calculation for specific two observation dimenstions \"all-average\":", "`transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation = None,", "= time.time() self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt)", "None, initial_mean = None, initial_covariance = None, transition_matrix = None, observation_matrix = None,", "n_dim_obs] {numpy-array, float} also known as :math:`H`. observation matrix from x_{t} to y_{t}", "= 1 #AA G /= C #AA if self.tm_count==1: self.F = self.HI @", "variable dtype {type} data type of numpy-array use_gpu {bool} wheather use gpu and", "\\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 =", "(adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy()", "self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d,", "* self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) +", "start_time Fh = self.HI @ G @ self.H self.times[3] += time.time() - start_time", "from predicted result Returns (numpy-array, float) : mean of hidden state at time", ":math:`y`. observation value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial state mean", "self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and", "space models \"\"\" from logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler", "So, your setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\",", "initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as", "\"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim =", "a link between i and j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1", "transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known", "<= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim must", "for time t # F = _last_dims(self.F, t, 2) # # calculate fixed", "= self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else:", "self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0", "# Args: # dim {int} : dimensionality for extract from RTS smoothed result", "estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or", "dtype = self.dtype) # calculate prediction and filter for every time for t", "raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if", "from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype =", "else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None: self.A =", "@ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t]", "{int} : dimensionality for extract from predicted result Returns (numpy-array, float) : mean", "elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2", "if self.tm_count==1: self.F = self.HI @ G @ self.H else: self.times[2] += time.time()", "dimensionality for extract from filtered result Returns (numpy-array, float) : mean of hidden", "self.dtype = dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for", "math import time import multiprocessing as mp import itertools import numpy as np", "= len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu", "= getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os", "eta (in (0,1]) update rate for update transition matrix F cutoff cutoff distance", "global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] +=", "\\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A = A[i] | A[j] local_node_number_i", "self.use_gpu) # calculate predicted distribution for time t self.x_pred[t] = self.F @ self.x_filt[t-1]", "t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0]", "self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T,", "self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args: t {int} :", "logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import", "import time import multiprocessing as mp import itertools import numpy as np from", "if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F =", "( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) ) self.x_filt[t] =", "= None, observation_matrix = None, transition_covariance = None, observation_covariance = None, adjacency_matrix =", "1]))) # # fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ # +", "each element of transition matrix \"local-average\": average calculation for specific two observation dimenstions", "# determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance,", "&\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is an algorithm designed to", "as :math:`A`. adjacency matrix, if there is a link between i and j,", "self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t +", "time print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if t ==", "self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method", "if False, calculate the matrix after filtering. n_dim_sys {int} dimension of system transition", "= self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if", "G[A] = G_local elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number =", "observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`. observation matrix from x_{t}", "dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else:", "real-time. As all state transitions and observations are linear with Gaussian distributed noise,", "t={}\".format(t) + \"/\" + str(T), end=\"\") if t == 0: # initial setting", "matrices and filtered states. if this variable is `None`, cannot save them. advance_mode", "True, calculate transition matrix before filtering. if False, calculate the matrix after filtering.", "self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1]) #", "t in reversed(range(T - 1)) : # # visualize calculating times # print(\"\\r", "Fh = self.HI @ G @ self.H self.times[3] += time.time() - start_time self.times[4]", "# : mean of hidden state at time t given observations # from", "dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype =", "_determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)],", "value # Args: # dim {int} : dimensionality for extract from RTS smoothed", "= _last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate predicted distribution for time", "self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)]", "int((estimation_length - 1) / 2) self.tau = 2 * self.tau2 + 1 self.I", "# except : # self.filter() # T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T,", "observation_covariance = None, adjacency_matrix = None, method = \"elementwise\", estimation_length = 10, estimation_interval", ": `transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation =", "matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as", "extract from predicted result Returns (numpy-array, float) : mean of hidden state at", "from logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG)", "data type of numpy-array use_gpu {bool} wheather use gpu and cupy. if True,", "if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta =", "math:: x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t} &= H_{t}", "A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1]", "float} also known as :math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys,", "implement `forward` try : self.x_pred[0] except : self.forward() if dim is None: return", "G_local elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA", "T-2] (notice t range is reversed from 1~T) # for t in reversed(range(T", "of observation variable dtype {type} data type of numpy-array use_gpu {bool} wheather use", "if not implement `filter`, implement `filter` # try : # self.x_pred[0] # except", "observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\":", "self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self,", "self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum)", "dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is", "start_time self.times[4] += 1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F -", "time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @", "is a link between i and j, A[i,j]=1, else A[i,j]=0. Besides, you should", "transition matrix F save_dir {str, directory-like} directory for saving transition matrices and filtered", "for time t \"\"\" # extract parameters for time t R = _last_dims(self.R,", "time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else :", ".util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A =", "distributed noise, these distributions can be represented exactly as Gaussian distributions with mean", "j, A, y): local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j =", ": `observation` F : `transition_matrix` Q : `transition_covariance` H : `observation_matrix` R :", "= self.x_pred[t] + K @ ( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt", "G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise if", "calculation \"elementwise\": calculation for each element of transition matrix \"local-average\": average calculation for", "t, 2, self.use_gpu) # calculate filter step K = self.V_pred @ ( self.H.T", "= p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": #", "= None, adjacency_matrix = None, method = \"elementwise\", estimation_length = 10, estimation_interval =", "= self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] +=", "return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim", "G[global_node_number, i] += Gh[:, local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average", "= self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1] += time.time() - start_time", "mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also known", "self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype", "\"\"\" # if not implement `forward`, implement `forward` try : self.x_filt[0] except :", "before filtering. if False, calculate the matrix after filtering. n_dim_sys {int} dimension of", "# elementwise if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A =", "self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T + Q def _filter_update(self, t):", "n_dim_sys = None, n_dim_obs = None, dtype = \"float32\", use_gpu = False, num_cpu", "\"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\"", "time t given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} :", "length of data y # x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean", "# : fixed interval smoothed gain # \"\"\" # # if not implement", "[0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at time", "dim is None: # return self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return", "result Returns (numpy-array, float) : mean of hidden state at time t given", "self.I = estimation_interval self.tm_count = 1 if save_dir is None: self.save_change = False", "times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at", "(initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs", "observations from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of hidden", "b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\\\", "\\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\"", "self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim must", "False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import cupy", "self.use_gpu = False else: self.xp = np self.use_gpu = False # determine dimensionality", "dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None: self.initial_covariance", "global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the", "= self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1])", "in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\" only allows", "= self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype", "self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t <", "(self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int} :", "else: raise ValueError('The dim must be less than ' + self.x_pred.shape[1] + '.')", "and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time() - start_time if", "[\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\",", "is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype", "t): \"\"\"Calculate fileter update Args: t {int} : observation time \"\"\" # extract", "(t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time() - start_time", "else: self.num_cpu = num_cpu self.eta = eta self.cutoff = cutoff self.dtype = dtype", "of hidden state at time t given observations from times [0...t-1] x_filt [n_time,", "array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs =", "# local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0]", "filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if t == 0: #", "cutoff self.dtype = dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter", "1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if", "t == 0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau)", "# dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) #", "self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA", "G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\":", "else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]:", "self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype)", "if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must", "itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": # local-average for i in", "between i and j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall i.", "self._update_transition_matrix(t) elif t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\":", "[n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`. system", "float} # : mean of hidden state distributions for times # [0...n_times-1] given", "`smooth`, implement `smooth` # try : # self.x_smooth[0] # except : # self.smooth()", "self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A", "self.use_gpu = False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean,", "covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`. observation", "is `None`, cannot save them. advance_mode {bool} if True, calculate transition matrix before", "# calculate filter step K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @", "y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at time", "initial_covariance = None, transition_matrix = None, observation_matrix = None, transition_covariance = None, observation_covariance", "= dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype)", "str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu", "times. Attributes: T {int} : length of data y x_pred [n_time, n_dim_sys] {numpy-array,", ": mean of hidden state at time t given observations # from times", "\\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local", "self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs,", "- K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args:", "if self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else:", "A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:,", "n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`. system transition", "rate for update transition matrix F cutoff cutoff distance for update transition matrix", "known as :math:`A`. adjacency matrix, if there is a link between i and", "self.method = method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" +", "save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F)", "times [0...t] \"\"\" # if not implement `forward`, implement `forward` try : self.x_filt[0]", "implements the LLOCK, for a Linear Gaussian model specified by, .. math:: x_{t+1}", "self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and t <", "for a Linear Gaussian model specified by, .. math:: x_{t+1} &= F_{t} x_{t}", "= None, transition_matrix = None, observation_matrix = None, transition_covariance = None, observation_covariance =", "if not implement `smooth`, implement `smooth` # try : # self.x_smooth[0] # except", "===================================================================== Inference with Local Linear Operator Construction with Kalman Filter ===================================================================== This module", "directory-like} directory for saving transition matrices and filtered states. if this variable is", "times # print(\"\\r smooth calculating... t={}\".format(T - t) # + \"/\" + str(T),", "dimension of observation variable dtype {type} data type of numpy-array use_gpu {bool} wheather", "self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix", "\"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta self.cutoff =", "advance_mode {bool} if True, calculate transition matrix before filtering. if False, calculate the", "or positive integer. Attributes: y : `observation` F : `transition_matrix` Q : `transition_covariance`", "self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A =", "= dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None:", "install package `cupy`. if False, set `numpy` for calculation. num_cpu {int} or `all`", "return self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else:", "self.V_filt = self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1] += time.time() -", "# + \"/\" + str(T), end=\"\") # # extract parameters for time t", "exactly as Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time,", "Args: dim {int} : dimensionality for extract from predicted result Returns (numpy-array, float)", "given observations from times [0...t] \"\"\" # if not implement `forward`, implement `forward`", "local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh", "as :math:`y`. observation value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial state", "dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for observation times.", "2) self.tau = 2 * self.tau2 + 1 self.I = estimation_interval self.tm_count =", "= dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype", "+ \"/\" + str(T), end=\"\") if t == 0: # initial setting self.x_pred[0]", "x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at time t", "not implement `filter`, implement `filter` # try : # self.x_pred[0] # except :", "= self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else:", "implement `forward` try : self.x_filt[0] except : self.forward() if dim is None: return", "# except : # self.smooth() # if dim is None: # return self.x_smooth", "if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else : start_time = time.time()", "than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None): \"\"\"Get filtered", "A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall i. method {string} : method", "calculation for specific two observation dimenstions \"all-average\": average calculation for each observation dimenstions", "self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i]", "transition matrices and filtered states. if this variable is `None`, cannot save them.", "t given observations from times [0...t-1] \"\"\" # if not implement `forward`, implement", "None, n_dim_obs = None, dtype = \"float32\", use_gpu = False, num_cpu = \"all\"):", "smooth calculating... t={}\".format(T - t) # + \"/\" + str(T), end=\"\") # #", "1) / 2) self.tau = 2 * self.tau2 + 1 self.I = estimation_interval", "self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype)", "self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim = None): # \"\"\"Get RTS", "n_dim_sys {int} dimension of system transition variable n_dim_obs {int} dimension of observation variable", "def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int} : observation time \"\"\"", "t {int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time =", "+ w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK", "self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for observation times. Attributes: T {int}", "1~T) # for t in reversed(range(T - 1)) : # # visualize calculating", "time t given observations from times [0...t-1] \"\"\" # if not implement `forward`,", "' + self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate RTS smooth for", "of hidden state distributions for times # [0...n_times-1] given all observations # V_smooth", "only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need", "at time t given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float}", "_parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype =", "V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at time t", "self.xp = np self.use_gpu = False else: self.xp = np self.use_gpu = False", "try : # self.x_smooth[0] # except : # self.smooth() # if dim is", "= dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if", "self.smooth() # if dim is None: # return self.x_smooth # elif dim <=", "calculate transition matrix before filtering. if False, calculate the matrix after filtering. n_dim_sys", "\"\"\"Calculate fileter update Args: t {int} : observation time \"\"\" # extract parameters", "\"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1) / 2) else: self.tau2", "def get_predicted_value(self, dim = None): \"\"\"Get predicted value Args: dim {int} : dimensionality", "= _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu", "= np self.use_gpu = False else: self.xp = np self.use_gpu = False #", "for times # [0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array,", ":math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as", "self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t +", "as mp import itertools import numpy as np from .utils import array1d, array2d", "\"backward\", save_dir = None, n_dim_sys = None, n_dim_obs = None, dtype = \"float32\",", "matrix before filtering. if False, calculate the matrix after filtering. n_dim_sys {int} dimension", "array2d, -2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if", "= self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for observation times. Attributes: T", "n_dim_sys] {numpy-array, float} : covariance of hidden state at time t given observations", "hidden state at time t given observations from times [0...t] \"\"\" T =", "= dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else:", "T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and", "= 1 if save_dir is None: self.save_change = False else: self.save_change = True", "# # if not implement `smooth`, implement `smooth` # try : # self.x_smooth[0]", "estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be", "number of cpus duaring calculating transition matrix. you can set `all` or positive", "= int((estimation_length - 1) / 2) else: self.tau2 = int((estimation_length - 1) /", "+= time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else", ": covariance of hidden state at time t given observations from times [0...t]", "time import multiprocessing as mp import itertools import numpy as np from .utils", "self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t in", "estimation_interval = 1, eta = 1., cutoff = 10., estimation_mode = \"backward\", save_dir", "to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`. observation matrix", "# calculate fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t +", "# try : # self.x_smooth[0] # except : # self.smooth() # if dim", "None, observation_matrix = None, transition_covariance = None, observation_covariance = None, adjacency_matrix = None,", "None, method = \"elementwise\", estimation_length = 10, estimation_interval = 1, eta = 1.,", "also known as :math:`A`. adjacency matrix, if there is a link between i", "self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype =", "# Returns (numpy-array, float) # : mean of hidden state at time t", ": # self.x_pred[0] # except : # self.filter() # T = self.y.shape[0] #", "float} : Kalman gain matrix for time t \"\"\" # extract parameters for", "observation time \"\"\" # extract parameters for time t-1 Q = _last_dims(self.Q, t", "array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d,", "self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\", \"local-average\",", "Linear Gaussian model specified by, .. math:: x_{t+1} &= F_{t} x_{t} + b_{t}", "2) else: self.tau2 = int((estimation_length - 1) / 2) self.tau = 2 *", "n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency matrix, if there is a", "= None, dtype = \"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial", "@ ( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred - K", "transition variable n_dim_obs {int} dimension of observation variable dtype {type} data type of", "@ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter update without noise Args:", "n_dim_obs = None, dtype = \"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup", "= cupy self.use_gpu = True except: self.xp = np self.use_gpu = False else:", "Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count +=", "else: self.xp = np self.use_gpu = False # determine dimensionality self.n_dim_sys = _determine_dimensionality(", "dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not None:", "self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred", "and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time() -", "float} also known as :math:`A`. adjacency matrix, if there is a link between", "As all state transitions and observations are linear with Gaussian distributed noise, these", ": # self.x_smooth[0] # except : # self.smooth() # if dim is None:", "# T : length of data y # x_smooth [n_time, n_dim_sys] {numpy-array, float}", "self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) )", "elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim must be", "= self.V_pred - K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition", "specified by, .. math:: x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\\\", "- start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else : start_time", "also known as :math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs]", "# A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval smoothed gain #", "+ str(T), end=\"\") if t == 0: # initial setting self.x_pred[0] = self.initial_mean", "= 10., estimation_mode = \"backward\", save_dir = None, n_dim_sys = None, n_dim_obs =", "2 * self.tau2 + 1 self.I = estimation_interval self.tm_count = 1 if save_dir", "C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1 #AA G /= C #AA", "of transition matrix \"local-average\": average calculation for specific two observation dimenstions \"all-average\": average", "self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta", "1 self.I = estimation_interval self.tm_count = 1 if save_dir is None: self.save_change =", "mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial state covariance", "\"\"\" # # if not implement `filter`, implement `filter` # try : #", "= self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else:", "extract from filtered result Returns (numpy-array, float) : mean of hidden state at", "calculating transition matrix. you can set `all` or positive integer. Attributes: y :", "self.x_pred[t] + K @ ( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt =", "= 1, eta = 1., cutoff = 10., estimation_mode = \"backward\", save_dir =", "{numpy-array, float} # : mean of hidden state distributions for times # [0...n_times-1]", "N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is an algorithm designed to estimate", "`observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean = None,", "covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`. observation covariance", "n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is", "start_time = time.time() self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] =", "elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim must be", "Attributes: T {int} : length of data y x_pred [n_time, n_dim_sys] {numpy-array, float}", "for Linear-Gaussian state space models \"\"\" from logging import getLogger, StreamHandler, DEBUG logger", "handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import math import time import", "and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`.", "if save_dir is None: self.save_change = False else: self.save_change = True self.save_dir =", "t, 2) # # calculate fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t],", ": observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if", "self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu =", "update rate for update transition matrix F cutoff cutoff distance for update transition", "= self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number,", "x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean of hidden state distributions for", "in range(T): # visualize calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\" +", "for every time for t in range(T): # visualize calculating time print(\"\\r filter", "dimensionality for extract from RTS smoothed result # Returns (numpy-array, float) # :", "every time for t in range(T): # visualize calculating time print(\"\\r filter calculating...", "filtering. if False, calculate the matrix after filtering. n_dim_sys {int} dimension of system", "t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T", "{bool} wheather use gpu and cupy. if True, you need install package `cupy`.", "estimation_mode = \"backward\", save_dir = None, n_dim_sys = None, n_dim_obs = None, dtype", "self.V_filt = self.V_pred - K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update", "# self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys,", "self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI", "estimation_interval self.tm_count = 1 if save_dir is None: self.save_change = False else: self.save_change", "range is reversed from 1~T) # for t in reversed(range(T - 1)) :", "the matrix after filtering. n_dim_sys {int} dimension of system transition variable n_dim_obs {int}", "= dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and filter for observation", "= True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" +", "initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial", "@ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T + Q def _filter_update(self,", "t): \"\"\"Calculate fileter update without noise Args: t {int} : observation time Attributes:", "Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA", "1 def get_predicted_value(self, dim = None): \"\"\"Get predicted value Args: dim {int} :", ": self.forward() if dim is None: return self.x_pred elif dim <= self.x_pred.shape[1]: return", "str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim = None): \"\"\"Get", "[0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at time", "+ str(T), end=\"\") # # extract parameters for time t # F =", ": covariances of hidden state distributions for times # [0...n_times-1] given all observations", "logger.propagate = False import os import math import time import multiprocessing as mp", "(numpy-array, float) : mean of hidden state at time t given observations from", "def __init__(self, observation = None, initial_mean = None, initial_covariance = None, transition_matrix =", "from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float}", "smoothed gain # \"\"\" # # if not implement `filter`, implement `filter` #", "True except: self.xp = np self.use_gpu = False else: self.xp = np self.use_gpu", "for observation times. Attributes: T {int} : length of data y x_pred [n_time,", "[n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`. observation matrix from x_{t} to", "self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter update without noise Args: t", "<= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim must be less than", "os import math import time import multiprocessing as mp import itertools import numpy", "= self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys),", "The LLOCK is an algorithm designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F`", "observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`. observation covariance adjacency_matrix", "if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1)", "= estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\",", "self.use_gpu ) # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None:", "def get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed value # Args: #", "{type} data type of numpy-array use_gpu {bool} wheather use gpu and cupy. if", "= save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"),", "where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": # local-average for i", "array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu", "\\\"all-average\\\". So, your setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in [\"forward\",", "array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y", "- self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim = None): # \"\"\"Get", "must be less than ' + self.x_filt.shape[1] + '.') # def smooth(self): #", "for time t R = _last_dims(self.R, t, 2, self.use_gpu) # calculate filter step", "need install package `cupy`. if False, set `numpy` for calculation. num_cpu {int} or", "are linear with Gaussian distributed noise, these distributions can be represented exactly as", "global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class", "transitions and observations are linear with Gaussian distributed noise, these distributions can be", "None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype =", "= _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d,", "[n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency matrix, if there is", "self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix", "{numpy-array, float} also known as :math:`H`. observation matrix from x_{t} to y_{t} transition_covariance", "also known as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float}", "`smooth` # try : # self.x_smooth[0] # except : # self.smooth() # if", "given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances", "known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float}", "[v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is an algorithm", "self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number", "cupy self.xp = cupy self.use_gpu = True except: self.xp = np self.use_gpu =", "(observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance,", "parameters for time t R = _last_dims(self.R, t, 2, self.use_gpu) # calculate filter", "array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d,", "all state transitions and observations are linear with Gaussian distributed noise, these distributions", ": `transition_matrix` Q : `transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\" def", "1)) : # # visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T -", "= dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None:", "fileter update without noise Args: t {int} : observation time Attributes: K [n_dim_sys,", "with Kalman Filter ===================================================================== This module implements the Local LOCK for Linear-Gaussian state", "or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length", "= self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] =", "import os import math import time import multiprocessing as mp import itertools import", "system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`.", "model specified by, .. math:: x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t}", "local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1,", "observation = None, initial_mean = None, initial_covariance = None, transition_matrix = None, observation_matrix", "of hidden state at time t given observations # from times [0...T] #", ": self.x_filt[0] except : self.forward() if dim is None: return self.x_filt elif dim", "(observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation)", "self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A", "self._update_transition_matrix(self.tau) else: if t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and", "-2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu )", "{bool} if True, calculate transition matrix before filtering. if False, calculate the matrix", "logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG)", "self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix", "-2), (observation_matrix, array2d, -1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2),", "state at time t given observations from times [0...t] \"\"\" # if not", "F = _last_dims(self.F, t, 2) # # calculate fixed interval smoothing gain #", "\"\"\"Implements the Local LOCK. This class implements the LLOCK, for a Linear Gaussian", "be less than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None):", "= time.time() self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t]", "False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1),", "[0, T-2] (notice t range is reversed from 1~T) # for t in", "import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler)", "self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T +", "Args: observation [n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`. observation value initial_mean", "with Gaussian distributed noise, these distributions can be represented exactly as Gaussian distributions", "num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import cupy self.xp", "global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] =", "( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred - K @", "`cupy`. if False, set `numpy` for calculation. num_cpu {int} or `all` number of", "state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`.", "K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t", "all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of", "\"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \"", "{int} dimension of observation variable dtype {type} data type of numpy-array use_gpu {bool}", "{numpy-array, float} : covariance of hidden state at time t given observations from", "+ 1], A.T)) # def get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed", "for extract from predicted result Returns (numpy-array, float) : mean of hidden state", "2, self.use_gpu) # calculate predicted distribution for time t self.x_pred[t] = self.F @", "after filtering. n_dim_sys {int} dimension of system transition variable n_dim_obs {int} dimension of", "self.tau = 2 * self.tau2 + 1 self.I = estimation_interval self.tm_count = 1", "V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at time t", "t {int} : observation time \"\"\" # extract parameters for time t-1 Q", "= None): # \"\"\"Get RTS smoothed value # Args: # dim {int} :", "from times [0...t-1] \"\"\" # if not implement `forward`, implement `forward` try :", "t in range(T): # visualize calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\"", "import cupy self.xp = cupy self.use_gpu = True except: self.xp = np self.use_gpu", "use gpu and cupy. if True, you need install package `cupy`. if False,", "t R = _last_dims(self.R, t, 2, self.use_gpu) # calculate filter step K =", "smoothing # self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t + 1] -", "not implement `forward`, implement `forward` try : self.x_filt[0] except : self.forward() if dim", "\"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args: t {int} : observation", "method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\" only", "# if not implement `filter`, implement `filter` # try : # self.x_pred[0] #", "be represented exactly as Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`. Args:", "self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs,", "\"local-average\": average calculation for specific two observation dimenstions \"all-average\": average calculation for each", "def smooth(self): # \"\"\"Calculate RTS smooth for times. # Args: # T :", "self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A y =", "time t given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} :", ": `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean =", "logger.addHandler(handler) logger.propagate = False import os import math import time import multiprocessing as", "hidden state distributions for times # [0...n_times-1] given all observations # A [n_dim_sys,", "= self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and t", "Q def _filter_update(self, t): \"\"\"Calculate fileter update without noise Args: t {int} :", "n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix", "the Local LOCK. This class implements the LLOCK, for a Linear Gaussian model", "{str, directory-like} directory for saving transition matrices and filtered states. if this variable", "float) : mean of hidden state at time t given observations from times", "Args: # dim {int} : dimensionality for extract from RTS smoothed result #", "implement `filter`, implement `filter` # try : # self.x_pred[0] # except : #", "\" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to be changed.\".format(method)) if", ":math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`.", "# self.filter() # T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype =", "self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t +", ": dimensionality for extract from predicted result Returns (numpy-array, float) : mean of", "dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise if self.use_gpu: A = self.A.get()", "self.forward() if dim is None: return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:,", "initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance,", "+ b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t} + w_{t}", "#LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i,", "eta = 1., cutoff = 10., estimation_mode = \"backward\", save_dir = None, n_dim_sys", "estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As all state transitions and", "and :math:`F` in real-time. As all state transitions and observations are linear with", "np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class", "if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q", "self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) #", "= self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction and", "algorithm designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As all", "time t given observations from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred =", "self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1]", "+ str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim = None):", "y_{0:t})` and :math:`F` in real-time. As all state transitions and observations are linear", "global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA G /= 2.0", "= self.F @ self.V_filt @ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter", "state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial state", "self.cutoff = cutoff self.dtype = dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction", "at time t given observations from times [0...t-1] \"\"\" # if not implement", "dim = None): \"\"\"Get filtered value Args: dim {int} : dimensionality for extract", "link between i and j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall", "num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta", "except : # self.filter() # T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys),", "<= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The dim must be less than", "global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number]", "# visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T - t) # +", "import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A = A[i]", "update_interval {int} interval of update transition matrix F eta (in (0,1]) update rate", "- start_time self.times[4] += 1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F", "ValueError('The dim must be less than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self,", "calculation for each element of transition matrix \"local-average\": average calculation for specific two", "get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed value # Args: # dim", "except : self.forward() if dim is None: return self.x_filt elif dim <= self.x_filt.shape[1]:", "1) / 2) else: self.tau2 = int((estimation_length - 1) / 2) self.tau =", "= self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction and filter for every", "# from times [0...T] # \"\"\" # # if not implement `smooth`, implement", "self.use_gpu = True except: self.xp = np self.use_gpu = False else: self.xp =", "predicted value Args: dim {int} : dimensionality for extract from predicted result Returns", "use_gpu {bool} wheather use gpu and cupy. if True, you need install package", "transition_covariance = None, observation_covariance = None, adjacency_matrix = None, method = \"elementwise\", estimation_length", "None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype =", "raise ValueError('The dim must be less than ' # + self.x_smooth.shape[1] + '.')", "if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype =", "time.time() self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def", "# self.x_smooth[0] # except : # self.smooth() # if dim is None: #", "_filter_update(self, t): \"\"\"Calculate fileter update without noise Args: t {int} : observation time", "matrix for time t \"\"\" # extract parameters for time t R =", "fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) #", "(in (0,1]) update rate for update transition matrix F cutoff cutoff distance for", "\\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to be", "of hidden state at time t given observations from times [0...t] V_filt [n_dim_sys,", "parameters for time t # F = _last_dims(self.F, t, 2) # # calculate", "\"/\" + str(T), end=\"\") # # extract parameters for time t # F", "observation dimenstions update_interval {int} interval of update transition matrix F eta (in (0,1])", "else: # raise ValueError('The dim must be less than ' # + self.x_smooth.shape[1]", "time t R = _last_dims(self.R, t, 2, self.use_gpu) # calculate filter step K", "variable is `None`, cannot save them. advance_mode {bool} if True, calculate transition matrix", "of hidden state at time t given observations from times [0...t-1] V_pred [n_dim_sys,", "self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1]", "dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance", "else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None: self.F =", "= 10, estimation_interval = 1, eta = 1., cutoff = 10., estimation_mode =", "end=\"\") if t == 0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred =", "# else: # raise ValueError('The dim must be less than ' # +", "self.x_filt[0] except : self.forward() if dim is None: return self.x_filt elif dim <=", "self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number =", "float} also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also", "itertools import numpy as np from .utils import array1d, array2d from .util_functions import", "calculate filter step K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred", "initial_mean = None, initial_covariance = None, transition_matrix = None, observation_matrix = None, transition_covariance", "handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import math", "self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred - K @ (self.H", "mean of hidden state at time t given observations from times [0...t-1] V_pred", "self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0]", "{int} : length of data y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean", "state distributions for times # [0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys,", "{numpy-array, float} also known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or", "(t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t < T-self.tau2 and", "_local_calculation(i, j, A, y): local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j", ":math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known", "num_cpu self.eta = eta self.cutoff = cutoff self.dtype = dtype self.times = self.xp.zeros(5)", "prediction and filter for every time for t in range(T): # visualize calculating", "self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance", "average calculation for specific two observation dimenstions \"all-average\": average calculation for each observation", "= None): \"\"\"Get filtered value Args: dim {int} : dimensionality for extract from", "= int(estimation_length) self.tau2 = int((estimation_length - 1) / 2) else: self.tau2 = int((estimation_length", "= self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth =", "2, self.use_gpu) # calculate filter step K = self.V_pred @ ( self.H.T @", "filtered states. if this variable is `None`, cannot save them. advance_mode {bool} if", "d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The", "cutoff distance for update transition matrix F save_dir {str, directory-like} directory for saving", "+ Q def _filter_update(self, t): \"\"\"Calculate fileter update without noise Args: t {int}", "#AA G /= C #AA if self.tm_count==1: self.F = self.HI @ G @", "changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\"", "for extract from RTS smoothed result # Returns (numpy-array, float) # : mean", "= self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if", "is not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys,", "t in [0, T-2] (notice t range is reversed from 1~T) # for", "self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T + Q def", "# def get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed value # Args:", "calculating times # print(\"\\r smooth calculating... t={}\".format(T - t) # + \"/\" +", ": `observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean = None, initial_covariance =", "/ 2) else: self.tau2 = int((estimation_length - 1) / 2) self.tau = 2", "update transition matrix F cutoff cutoff distance for update transition matrix F save_dir", "Q : `transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self, observation", "t) # + \"/\" + str(T), end=\"\") # # extract parameters for time", "value Args: dim {int} : dimensionality for extract from predicted result Returns (numpy-array,", "should A[i,i]=1 forall i. method {string} : method for localized calculation \"elementwise\": calculation", "of data y # x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean of", "self.xp = cupy self.use_gpu = True except: self.xp = np self.use_gpu = False", "dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is", "self.V_filt[-1] # # t in [0, T-2] (notice t range is reversed from", "not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype", "def _filter_update(self, t): \"\"\"Calculate fileter update without noise Args: t {int} : observation", "[0...t-1] \"\"\" # if not implement `forward`, implement `forward` try : self.x_pred[0] except", "class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class implements the LLOCK, for", "\"\"\" if use_gpu: try: import cupy self.xp = cupy self.use_gpu = True except:", "-2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean", "`None`, cannot save them. advance_mode {bool} if True, calculate transition matrix before filtering.", ":math:`A`. adjacency matrix, if there is a link between i and j, A[i,j]=1,", "the Local LOCK for Linear-Gaussian state space models \"\"\" from logging import getLogger,", "print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if t == 0:", "LLOCK is an algorithm designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in", "p.close() G[A] = G_local elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number", "= self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number,", "cupy. if True, you need install package `cupy`. if False, set `numpy` for", "except : self.forward() if dim is None: return self.x_pred elif dim <= self.x_pred.shape[1]:", "= 1., cutoff = 10., estimation_mode = \"backward\", save_dir = None, n_dim_sys =", "dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None: self.F", "\"\"\" ===================================================================== Inference with Local Linear Operator Construction with Kalman Filter ===================================================================== This", "= dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix,", "forward(self): \"\"\"Calculate prediction and filter for observation times. Attributes: T {int} : length", "{int} : observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain", "for time t-1 Q = _last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate", "self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R =", "LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class implements the LLOCK, for a", "F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t}", "average calculation for each observation dimenstions update_interval {int} interval of update transition matrix", "you need install package `cupy`. if False, set `numpy` for calculation. num_cpu {int}", "state transitions and observations are linear with Gaussian distributed noise, these distributions can", "\\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1", "self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype)", "@ self.V_filt @ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter update without", "+ 1]) # self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1]", "represented exactly as Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`. Args: observation", "is None: return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise", "dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction", "x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also", "False else: self.save_change = True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I)))", "n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix from x_{t-1}", "local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class implements the LLOCK,", "@ (self.V_pred @ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] + K @", "Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix for time t", "= eta self.cutoff = cutoff self.dtype = dtype self.times = self.xp.zeros(5) def forward(self):", "self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim must be", "= self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1],", "Inference with Local Linear Operator Construction with Kalman Filter ===================================================================== This module implements", "[n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`. observation value initial_mean [n_dim_sys] {float}", "_determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu )", "to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else:", "K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) +", "self.V_pred - K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix", "in real-time. As all state transitions and observations are linear with Gaussian distributed", "for update transition matrix F save_dir {str, directory-like} directory for saving transition matrices", "\".npy\"), self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu", "numpy-array use_gpu {bool} wheather use gpu and cupy. if True, you need install", "n_dim_sys] {numpy-array, float} # : fixed interval smoothed gain # \"\"\" # #", "without noise Args: t {int} : observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array,", "n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`. system transition covariance", "dtype = dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype)", "10, estimation_interval = 1, eta = 1., cutoff = 10., estimation_mode = \"backward\",", "times. # Args: # T : length of data y # x_smooth [n_time,", "A, y): local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0])", "duaring calculating transition matrix. you can set `all` or positive integer. Attributes: y", "= self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype", "= self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local =", "times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at", "@ G @ self.H else: self.times[2] += time.time() - start_time Fh = self.HI", "start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args:", "zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif self.method==\"local-average\": # local-average for", "state at time t given observations from times [0...t-1] \"\"\" # if not", "dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None: self.H", "self.x_pred[t]) ) self.V_filt = self.V_pred - K @ (self.H @ self.V_pred) def _update_transition_matrix(self,", "matrix, if there is a link between i and j, A[i,j]=1, else A[i,j]=0.", "= self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype)", "state at time t given observations # from times [0...T] # \"\"\" #", "dim {int} : dimensionality for extract from filtered result Returns (numpy-array, float) :", "str(T), end=\"\") if t == 0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred", "initial parameters. \"\"\" if use_gpu: try: import cupy self.xp = cupy self.use_gpu =", "transition matrix F eta (in (0,1]) update rate for update transition matrix F", "for times. # Args: # T : length of data y # x_smooth", "[n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys,", "_last_dims(self.R, t, 2, self.use_gpu) # calculate filter step K = self.V_pred @ (", "Attributes: y : `observation` F : `transition_matrix` Q : `transition_covariance` H : `observation_matrix`", "\"\"\"Get RTS smoothed value # Args: # dim {int} : dimensionality for extract", "@ ( self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) ) self.x_filt[t]", "= self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh", "{int} or `all` number of cpus duaring calculating transition matrix. you can set", "G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA G /=", "not implement `smooth`, implement `smooth` # try : # self.x_smooth[0] # except :", "adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool)", "= len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T)", "as Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs]", "you should A[i,i]=1 forall i. method {string} : method for localized calculation \"elementwise\":", "for update transition matrix F cutoff cutoff distance for update transition matrix F", "elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number", "y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0],", "# initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t", "Construction with Kalman Filter ===================================================================== This module implements the Local LOCK for Linear-Gaussian", "[n_dim_sys] {float} also known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array,", "dim = None): \"\"\"Get predicted value Args: dim {int} : dimensionality for extract", "in [0, T-2] (notice t range is reversed from 1~T) # for t", "save them. advance_mode {bool} if True, calculate transition matrix before filtering. if False,", "self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype", "try: import cupy self.xp = cupy self.use_gpu = True except: self.xp = np", ": Kalman gain matrix for time t \"\"\" # extract parameters for time", "setting \\\"{}\\\" need to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode", "= False import os import math import time import multiprocessing as mp import", "len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu ==", "extract parameters for time t # F = _last_dims(self.F, t, 2) # #", "None, n_dim_sys = None, n_dim_obs = None, dtype = \"float32\", use_gpu = False,", "eta self.cutoff = cutoff self.dtype = dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate", "local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @", "T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # self.V_smooth", "= len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t,", "float} : covariance of hidden state at time t given observations from times", "self.dtype) # calculate prediction and filter for every time for t in range(T):", "in reversed(range(T - 1)) : # # visualize calculating times # print(\"\\r smooth", "observation [n_time, n_dim_obs] {numpy-array, float} also known as :math:`y`. observation value initial_mean [n_dim_sys]", "the LLOCK, for a Linear Gaussian model specified by, .. math:: x_{t+1} &=", "+ str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else:", "= self.dtype) # calculate prediction and filter for every time for t in", "self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate", "# \"\"\"Get RTS smoothed value # Args: # dim {int} : dimensionality for", "from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state", "t \"\"\" # extract parameters for time t R = _last_dims(self.R, t, 2,", "Gaussian model specified by, .. math:: x_{t+1} &= F_{t} x_{t} + b_{t} +", "\"\"\" # extract parameters for time t-1 Q = _last_dims(self.Q, t - 1,", "self.initial_covariance = self.xp.asarray(initial_covariance, dtype = dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys,", "np from .utils import array1d, array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality", "+ K @ ( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred", "self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T))", "{numpy-array, float} # : covariances of hidden state distributions for times # [0...n_times-1]", "return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class implements", "dimenstions \"all-average\": average calculation for each observation dimenstions update_interval {int} interval of update", ") self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t] - (self.H @ self.x_pred[t])", "# calculate predicted distribution for time t self.x_pred[t] = self.F @ self.x_filt[t-1] self.V_pred", "# self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t", "= self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1] where_is_A", "interval of update transition matrix F eta (in (0,1]) update rate for update", "x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O,", "in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T)", "time.time() - start_time self.times[4] += 1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff,", ": mean of hidden state distributions for times # [0...n_times-1] given all observations", "dtype = dtype) if adjacency_matrix is None: self.A = self.xp.eye(dtype=bool) else: self.A =", ":math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As all state transitions and observations", "str(T), end=\"\") # # extract parameters for time t # F = _last_dims(self.F,", "False, calculate the matrix after filtering. n_dim_sys {int} dimension of system transition variable", "update transition matrix F eta (in (0,1]) update rate for update transition matrix", ":math:`F`. transition matrix from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also", "allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your setting \\\"{}\\\" need to", "= mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local", "self.x_pred[:, int(dim)] else: raise ValueError('The dim must be less than ' + self.x_pred.shape[1]", "= num_cpu self.eta = eta self.cutoff = cutoff self.dtype = dtype self.times =", "hidden state at time t given observations from times [0...t-1] x_filt [n_time, n_dim_sys]", "times [0...T] # \"\"\" # # if not implement `smooth`, implement `smooth` #", "if dim is None: return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)]", "if not implement `forward`, implement `forward` try : self.x_filt[0] except : self.forward() if", ".utils import array1d, array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i,", "int(dim)] else: raise ValueError('The dim must be less than ' + self.x_filt.shape[1] +", "float} also known as :math:`y`. observation value initial_mean [n_dim_sys] {float} also known as", "self.H.T @ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t]", "= self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p", "len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T)", "= self.HI @ G @ self.H else: self.times[2] += time.time() - start_time Fh", "range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\", "{int} : dimensionality for extract from filtered result Returns (numpy-array, float) : mean", "= dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None:", "[\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length - 1) / 2) else:", ": method for localized calculation \"elementwise\": calculation for each element of transition matrix", "import array1d, array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j,", "calculate the matrix after filtering. n_dim_sys {int} dimension of system transition variable n_dim_obs", "Args: # T : length of data y # x_smooth [n_time, n_dim_sys] {numpy-array,", "known as :math:`y`. observation value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial", "# for t in reversed(range(T - 1)) : # # visualize calculating times", "also known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also", "if not implement `forward`, implement `forward` try : self.x_pred[0] except : self.forward() if", "array1d, array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A,", "float} also known as :math:`H`. observation matrix from x_{t} to y_{t} transition_covariance [n_time-1,", "t {int} : observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman", "reversed from 1~T) # for t in reversed(range(T - 1)) : # #", "i. method {string} : method for localized calculation \"elementwise\": calculation for each element", "self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype", "# Args: # T : length of data y # x_smooth [n_time, n_dim_sys]", "v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T", "transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix", "[n_time, n_dim_sys] {numpy-array, float} # : mean of hidden state distributions for times", "matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array,", "# # t in [0, T-2] (notice t range is reversed from 1~T)", "RTS smoothed result # Returns (numpy-array, float) # : mean of hidden state", "and j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall i. method {string}", "self.x_pred[0] # except : # self.filter() # T = self.y.shape[0] # self.x_smooth =", "| A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh =", "dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H)", "self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method in", "T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T,", "/= C #AA if self.tm_count==1: self.F = self.HI @ G @ self.H else:", "as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`.", "C #AA if self.tm_count==1: self.F = self.HI @ G @ self.H else: self.times[2]", "distance for update transition matrix F save_dir {str, directory-like} directory for saving transition", ": start_time = time.time() self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir,", "integer. Attributes: y : `observation` F : `transition_matrix` Q : `transition_covariance` H :", "must be less than ' + self.x_pred.shape[1] + '.') def get_filtered_value(self, dim =", "in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen", "# \"\"\" # # if not implement `smooth`, implement `smooth` # try :", "This module implements the Local LOCK for Linear-Gaussian state space models \"\"\" from", "is None: self.save_change = False else: self.save_change = True self.save_dir = save_dir self.fillnum", "dim is None: return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else:", "None, adjacency_matrix = None, method = \"elementwise\", estimation_length = 10, estimation_interval = 1,", "= mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta self.cutoff = cutoff self.dtype", "- 1) / 2) self.tau = 2 * self.tau2 + 1 self.I =", "of hidden state distributions for times # [0...n_times-1] given all observations # A", "===================================================================== This module implements the Local LOCK for Linear-Gaussian state space models \"\"\"", "@ (self.H @ self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int}", "if True, you need install package `cupy`. if False, set `numpy` for calculation.", "self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: #", "method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\".", "\"/\" + str(T), end=\"\") if t == 0: # initial setting self.x_pred[0] =", "linear with Gaussian distributed noise, these distributions can be represented exactly as Gaussian", "n_dim_obs] {numpy-array, float} also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array,", "global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:, local_node_number] #LA G", "from times [0...T] # \"\"\" # # if not implement `smooth`, implement `smooth`", "observation matrix from x_{t} to y_{t} transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]", "y : `observation` F : `transition_matrix` Q : `transition_covariance` H : `observation_matrix` R", "self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time() - start_time if self.xp.any(self.xp.isnan(self.y[t])):", "H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O],", "# fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t", "# self.x_pred[0] # except : # self.filter() # T = self.y.shape[0] # self.x_smooth", "= G_local elif self.method==\"local-average\": # local-average for i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0])", "+ \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu =", "i in range(self.n_dim_obs): local_node_number = len(self.xp.where(self.A[i][:i])[0]) #LA global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1,", "self.F) self.tm_count += 1 def get_predicted_value(self, dim = None): \"\"\"Get predicted value Args:", "at time t given observations from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred", "\\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] += Gh[:,", "1 if save_dir is None: self.save_change = False else: self.save_change = True self.save_dir", "implement `filter` # try : # self.x_pred[0] # except : # self.filter() #", "if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance =", "Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object) : \"\"\"Implements the Local LOCK. This class implements the", "dtype = dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype)", "extract parameters for time t-1 Q = _last_dims(self.Q, t - 1, 2, self.use_gpu)", "/= 2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for", "None: return self.x_pred elif dim <= self.x_pred.shape[1]: return self.x_pred[:, int(dim)] else: raise ValueError('The", "+ R) ) self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t] - (self.H", "mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] = G_local elif", "# self.smooth() # if dim is None: # return self.x_smooth # elif dim", "# x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean of hidden state distributions", "return self.x_pred[:, int(dim)] else: raise ValueError('The dim must be less than ' +", "self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance", "with Local Linear Operator Construction with Kalman Filter ===================================================================== This module implements the", "calculate fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1])))", "w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is", "known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known", "= self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype = dtype)", "and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif", "None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype", "time t # F = _last_dims(self.F, t, 2) # # calculate fixed interval", "# if not implement `forward`, implement `forward` try : self.x_pred[0] except : self.forward()", "method for localized calculation \"elementwise\": calculation for each element of transition matrix \"local-average\":", ") self.V_filt = self.V_pred - K @ (self.H @ self.V_pred) def _update_transition_matrix(self, t):", "dim must be less than ' + self.x_filt.shape[1] + '.') # def smooth(self):", "{numpy-array, float} # : fixed interval smoothed gain # \"\"\" # # if", "= dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not", "calculation for each observation dimenstions update_interval {int} interval of update transition matrix F", "None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.initial_covariance = self.xp.asarray(initial_covariance, dtype =", "be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\",", "Returns (numpy-array, float) # : mean of hidden state at time t given", "n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys]", "dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim", "_last_dims, \\ _determine_dimensionality def _local_calculation(i, j, A, y): local_A = A[i] | A[j]", "= False else: self.save_change = True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] /", "dtype = self.dtype) # self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype)", "self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype)", "Args: t {int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time", "states. if this variable is `None`, cannot save them. advance_mode {bool} if True,", "covariance of hidden state at time t given observations from times [0...t] \"\"\"", "float} # : covariances of hidden state distributions for times # [0...n_times-1] given", "[n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition matrix from x_{t-1} to x_{t}", "K @ ( self.y[t] - (self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred -", "self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A),", "initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >=", "# F = _last_dims(self.F, t, 2) # # calculate fixed interval smoothing gain", "[n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix for time t \"\"\" #", "initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys]", "fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A, self.x_smooth[t +", "n_dim_sys] {numpy-array, float} also known as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs,", "transition matrix F cutoff cutoff distance for update transition matrix F save_dir {str,", "= cutoff self.dtype = dtype self.times = self.xp.zeros(5) def forward(self): \"\"\"Calculate prediction and", "len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return", "num_cpu {int} or `all` number of cpus duaring calculating transition matrix. you can", "self.n_dim_sys), # dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype)", "gain # \"\"\" # # if not implement `filter`, implement `filter` # try", "method = \"elementwise\", estimation_length = 10, estimation_interval = 1, eta = 1., cutoff", "t >= self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time", "Gh[:, local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs,", "\"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\": self.num_cpu = mp.cpu_count()", "transition matrix Args: t {int} : observation time \"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs),", "given observations from times [0...t-1] \"\"\" # if not implement `forward`, implement `forward`", "self.V_filt @ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate fileter update without noise", "observation_matrix = None, transition_covariance = None, observation_covariance = None, adjacency_matrix = None, method", "= len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\", "as np from .utils import array1d, array2d from .util_functions import _parse_observations, _last_dims, \\", "[(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2), (observation_matrix, array2d, -1)], n_dim_sys,", ":math:`F` in real-time. As all state transitions and observations are linear with Gaussian", "@ self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] +", "self.tm_count = 1 if save_dir is None: self.save_change = False else: self.save_change =", "# # extract parameters for time t # F = _last_dims(self.F, t, 2)", "time.time() - start_time Fh = self.HI @ G @ self.H self.times[3] += time.time()", "dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim must be less", "== 0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else:", "self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t] -", "\"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu:", "calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if t == 0: # initial", "filtered value Args: dim {int} : dimensionality for extract from filtered result Returns", "also known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array,", "try : self.x_pred[0] except : self.forward() if dim is None: return self.x_pred elif", "else: raise ValueError('The dim must be less than ' + self.x_filt.shape[1] + '.')", "+ '.') # def smooth(self): # \"\"\"Calculate RTS smooth for times. # Args:", "n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2), (adjacency_matrix,", "R) ) self.x_filt[t] = self.x_pred[t] + K @ ( self.y[t] - (self.H @", "update without noise Args: t {int} : observation time Attributes: K [n_dim_sys, n_dim_obs]", "self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype)", "self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1]", "set `numpy` for calculation. num_cpu {int} or `all` number of cpus duaring calculating", "/ self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) + \".npy\"), self.F) if num_cpu == \"all\":", "# # fixed interval smoothing # self.x_smooth[t] = self.x_filt[t] \\ # + self.xp.dot(A,", "and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\":", "A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1]", "else: self.times[2] += time.time() - start_time Fh = self.HI @ G @ self.H", "method {string} : method for localized calculation \"elementwise\": calculation for each element of", "1], A.T)) # def get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed value", "self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p =", "\"\"\" from logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler = StreamHandler()", "covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also known as :math:`F`. transition", ">= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and t", "filtering. n_dim_sys {int} dimension of system transition variable n_dim_obs {int} dimension of observation", "= self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir,", "C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0]", "F : `transition_matrix` Q : `transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\"", "t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and", "transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q =", "- 1) / 2) else: self.tau2 = int((estimation_length - 1) / 2) self.tau", "models \"\"\" from logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\") handler =", "self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta self.cutoff = cutoff", "calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\") if t", "__init__(self, observation = None, initial_mean = None, initial_covariance = None, transition_matrix = None,", "< T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0", "self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype)", "for localized calculation \"elementwise\": calculation for each element of transition matrix \"local-average\": average", "+ self.xp.dot(A, self.x_smooth[t + 1] - self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t]", "{numpy-array, float} : mean of hidden state at time t given observations from", "an algorithm designed to estimate :math:`P(x_t | y_{0:t})` and :math:`F` in real-time. As", "global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1 #AA", "hidden state distributions for times # [0...n_times-1] given all observations # V_smooth [n_time,", "x_{t} + b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t} +", "| y_{0:t})` and :math:`F` in real-time. As all state transitions and observations are", "parameters for time t-1 Q = _last_dims(self.Q, t - 1, 2, self.use_gpu) #", "them. advance_mode {bool} if True, calculate transition matrix before filtering. if False, calculate", "+ 1] - self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim = None):", "# A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing", "need to be changed.\".format(method)) if estimation_mode in [\"forward\", \"middle\", \"backward\"]: self.estimation_mode = estimation_mode", "getLogger(\"llock\") handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import", "less than ' + self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate RTS", "- start_time Fh = self.HI @ G @ self.H self.times[3] += time.time() -", "as :math:`\\mu_0`. initial state mean initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as", "n_dim_obs] {numpy-array, float} also known as :math:`y`. observation value initial_mean [n_dim_sys] {float} also", "\\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]]) The LLOCK is an", "i and j, A[i,j]=1, else A[i,j]=0. Besides, you should A[i,i]=1 forall i. method", "for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @", "\"elementwise\": calculation for each element of transition matrix \"local-average\": average calculation for specific", "F save_dir {str, directory-like} directory for saving transition matrices and filtered states. if", "Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA", "<reponame>ZoneTsuyoshi/pyassim<filename>pyassim/llock.py \"\"\" ===================================================================== Inference with Local Linear Operator Construction with Kalman Filter =====================================================================", "and filter for observation times. Attributes: T {int} : length of data y", "self.tm_count==1: self.F = self.HI @ G @ self.H else: self.times[2] += time.time() -", "+ 1 self.I = estimation_interval self.tm_count = 1 if save_dir is None: self.save_change", "observations from times [0...t] \"\"\" # if not implement `forward`, implement `forward` try", "Gaussian distributions with mean `x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array,", "mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta self.cutoff = cutoff self.dtype =", "is None: # return self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:,", "#LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype)", "self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"),", "+ \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length)", "# return self.x_smooth # elif dim <= self.x_smooth.shape[1]: # return self.x_smooth[:, int(dim)] #", "noise, these distributions can be represented exactly as Gaussian distributions with mean `x_filt[t]`", "None, transition_matrix = None, observation_matrix = None, transition_covariance = None, observation_covariance = None,", "`transition_matrix` Q : `transition_covariance` H : `observation_matrix` R : `observation_covariance` \"\"\" def __init__(self,", "self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate RTS smooth for times. #", "= StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False import os import math import", "if there is a link between i and j, A[i,j]=1, else A[i,j]=0. Besides,", "self.x_pred[t] self.V_filt = self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1] += time.time()", "multiprocessing as mp import itertools import numpy as np from .utils import array1d,", "as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys]{numpy-array, float} also", "t range is reversed from 1~T) # for t in reversed(range(T - 1))", "None): \"\"\"Get filtered value Args: dim {int} : dimensionality for extract from filtered", "_determine_dimensionality def _local_calculation(i, j, A, y): local_A = A[i] | A[j] local_node_number_i =", ": length of data y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of", "False else: self.xp = np self.use_gpu = False # determine dimensionality self.n_dim_sys =", "@ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA", "== \"all\": self.num_cpu = mp.cpu_count() else: self.num_cpu = num_cpu self.eta = eta self.cutoff", "global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[i, global_node_number] += Gh[local_node_number] #LA G[global_node_number, i] +=", "+= 1 def get_predicted_value(self, dim = None): \"\"\"Get predicted value Args: dim {int}", "and cupy. if True, you need install package `cupy`. if False, set `numpy`", "self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) # calculate prediction and filter", "(0,1]) update rate for update transition matrix F cutoff cutoff distance for update", "_last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate predicted distribution for time t", "from times [0...t] \"\"\" # if not implement `forward`, implement `forward` try :", "This class implements the LLOCK, for a Linear Gaussian model specified by, ..", "except: self.xp = np self.use_gpu = False else: self.xp = np self.use_gpu =", "return self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim must be less than", "n_dim_sys] {numpy-array, float} : mean of hidden state at time t given observations", "[n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys,", ": mean of hidden state at time t given observations from times [0...t-1]", "if use_gpu: try: import cupy self.xp = cupy self.use_gpu = True except: self.xp", "time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter", ": dimensionality for extract from filtered result Returns (numpy-array, float) : mean of", "+= time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate", "hidden state at time t given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys]", "smoothed value # Args: # dim {int} : dimensionality for extract from RTS", "\"\"\"Calculate prediction and filter for observation times. Attributes: T {int} : length of", "n_dim_obs] {numpy-array, float} : Kalman gain matrix for time t \"\"\" # extract", "- Fh), self.cutoff) if self.save_change: self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count", "time t given observations # from times [0...T] # \"\"\" # # if", "\"\"\" # extract parameters for time t R = _last_dims(self.R, t, 2, self.use_gpu)", "\"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\"", "self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if t >= 2 and", "data y x_pred [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state at", "dimensionality for extract from predicted result Returns (numpy-array, float) : mean of hidden", "self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method", "calculate prediction and filter for every time for t in range(T): # visualize", "G[self.xp.ix_(global_node_number, global_node_number)] += Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1", "= np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j]", "given observations # from times [0...T] # \"\"\" # # if not implement", "interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # #", "dim {int} : dimensionality for extract from RTS smoothed result # Returns (numpy-array,", "observations from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype", "or `all` number of cpus duaring calculating transition matrix. you can set `all`", "None, dtype = \"float32\", use_gpu = False, num_cpu = \"all\"): \"\"\"Setup initial parameters.", "# visualize calculating time print(\"\\r filter calculating... t={}\".format(t) + \"/\" + str(T), end=\"\")", "time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix for time", "observations are linear with Gaussian distributed noise, these distributions can be represented exactly", "+ d_{t} + w_{t} \\\\ [v_{t}, w_{t}]^T &\\sim N(0, [[Q_{t}, O], [O, R_{t}]])", "len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number = np.where(local_A)[0] Gh = y[1:, global_node_number].T \\ @", "t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >= self.tau2+2 and", "local_A = A[i] | A[j] local_node_number_i = len(np.where(local_A[:i])[0]) local_node_number_j = len(np.where(local_A[:j])[0]) global_node_number =", "1] - self.x_pred[t + 1]) # self.V_smooth[t] = self.V_filt[t] \\ # + self.xp.dot(A,", "10., estimation_mode = \"backward\", save_dir = None, n_dim_sys = None, n_dim_obs = None,", "Kalman Filter ===================================================================== This module implements the Local LOCK for Linear-Gaussian state space", "dimension of system transition variable n_dim_obs {int} dimension of observation variable dtype {type}", "for saving transition matrices and filtered states. if this variable is `None`, cannot", "self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys,", "must be choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in", "n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys]", "two observation dimenstions \"all-average\": average calculation for each observation dimenstions update_interval {int} interval", "y # x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean of hidden state", "\"\"\"Calculate fileter update without noise Args: t {int} : observation time Attributes: K", "#AA C[C==0] = 1 #AA G /= C #AA if self.tm_count==1: self.F =", "\"\"\" G = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) start_time = time.time() if self.method==\"elementwise\": # elementwise", "if False, set `numpy` for calculation. num_cpu {int} or `all` number of cpus", "#all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs): global_node_number =", "a Linear Gaussian model specified by, .. math:: x_{t+1} &= F_{t} x_{t} +", "extract parameters for time t R = _last_dims(self.R, t, 2, self.use_gpu) # calculate", "matrix \"local-average\": average calculation for specific two observation dimenstions \"all-average\": average calculation for", "+ '.') def get_filtered_value(self, dim = None): \"\"\"Get filtered value Args: dim {int}", "self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype = dtype) else: self.H = self.xp.asarray(observation_matrix, dtype =", "known as :math:`R`. observation covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as", "implement `smooth` # try : # self.x_smooth[0] # except : # self.smooth() #", "A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing #", "for calculation. num_cpu {int} or `all` number of cpus duaring calculating transition matrix.", "Operator Construction with Kalman Filter ===================================================================== This module implements the Local LOCK for", "if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable \\\"method\\\"", "+= 1 self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff)", "\"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt =", "# self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t in [0,", "state at time t given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array,", "True self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum)", "self.H else: self.times[2] += time.time() - start_time Fh = self.HI @ G @", "None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method", "# extract parameters for time t-1 Q = _last_dims(self.Q, t - 1, 2,", "`all` or positive integer. Attributes: y : `observation` F : `transition_matrix` Q :", "None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype =", "is None: self.A = self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if", "choosen from \\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]:", "False, set `numpy` for calculation. num_cpu {int} or `all` number of cpus duaring", "self.estimation_mode = estimation_mode else: raise ValueError(\"\\\"estimation_mode\\\" must be choosen from \\\"forward\\\",\" + \"", "is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean = self.xp.asarray(initial_mean, dtype", "= self.HI @ G @ self.H self.times[3] += time.time() - start_time self.times[4] +=", "array2d, -2), (adjacency_matrix, array2d, -2)], n_dim_obs, self.use_gpu ) # self.y = _parse_observations(observation) self.y", "value Args: dim {int} : dimensionality for extract from filtered result Returns (numpy-array,", "x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`. observation matrix from", "self.xp.eye(self.n_dim_sys, dtype = dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype", "# raise ValueError('The dim must be less than ' # + self.x_smooth.shape[1] +", "`all` number of cpus duaring calculating transition matrix. you can set `all` or", "= self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if", "if this variable is `None`, cannot save them. advance_mode {bool} if True, calculate", "forall i. method {string} : method for localized calculation \"elementwise\": calculation for each", "fixed interval smoothed gain # \"\"\" # # if not implement `filter`, implement", "Gaussian distributed noise, these distributions can be represented exactly as Gaussian distributions with", "not implement `forward`, implement `forward` try : self.x_pred[0] except : self.forward() if dim", "self.xp.linalg.inv(self.H @ (self.V_pred @ self.H.T) + R) ) self.x_filt[t] = self.x_pred[t] + K", "self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t in [0, T-2] (notice t", "2) # # calculate fixed interval smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T,", "[0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float} # :", "self.HI @ G @ self.H self.times[3] += time.time() - start_time self.times[4] += 1", "than ' + self.x_filt.shape[1] + '.') # def smooth(self): # \"\"\"Calculate RTS smooth", "time for t in range(T): # visualize calculating time print(\"\\r filter calculating... t={}\".format(t)", "interval smoothed gain # \"\"\" # # if not implement `filter`, implement `filter`", "return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim", "observations from times [0...t-1] \"\"\" # if not implement `forward`, implement `forward` try", "[0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed", "else: self.initial_mean = self.xp.asarray(initial_mean, dtype = dtype) if initial_covariance is None: self.initial_covariance =", "# # visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T - t) #", "G @ self.H self.times[3] += time.time() - start_time self.times[4] += 1 self.F =", "bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise ValueError(\"Variable", "self.x_filt[:, int(dim)] else: raise ValueError('The dim must be less than ' + self.x_filt.shape[1]", "# : mean of hidden state distributions for times # [0...n_times-1] given all", "times # [0...n_times-1] given all observations # V_smooth [n_time, n_dim_sys, n_dim_sys] {numpy-array, float}", "self.use_gpu) # calculate filter step K = self.V_pred @ ( self.H.T @ self.xp.linalg.inv(self.H", "self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R", "else: if t >= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\":", "state space models \"\"\" from logging import getLogger, StreamHandler, DEBUG logger = getLogger(\"llock\")", "# + self.xp.dot(A, self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T)) # def", "hidden state at time t given observations from times [0...t] V_filt [n_dim_sys, n_dim_sys]", "try : self.x_filt[0] except : self.forward() if dim is None: return self.x_filt elif", "self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t", "#LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in", "[n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`. system transition covariance observation_covariance [n_time,", "save_dir {str, directory-like} directory for saving transition matrices and filtered states. if this", "float} # : fixed interval smoothed gain # \"\"\" # # if not", "A = self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A) p = mp.Pool(self.num_cpu) G_local", "and observations are linear with Gaussian distributed noise, these distributions can be represented", "directory for saving transition matrices and filtered states. if this variable is `None`,", "= bool) if method in [\"elementwise\", \"local-average\", \"all-average\"]: self.method = method else: raise", "Args: t {int} : observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} :", "+= Gh[:, local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average C =", "Gh = y[1:, global_node_number].T \\ @ np.linalg.pinv(y[:-1, global_node_number].T) return Gh[local_node_number_i, local_node_number_j] class LocalLOCK(object)", "if self.use_gpu: A = self.A.get() y = self.y[t-self.tau:t+1].get() else: A = self.A y", "for each element of transition matrix \"local-average\": average calculation for specific two observation", "from x_{t-1} to x_{t} observation_matrix [n_dim_sys, n_dim_obs] {numpy-array, float} also known as :math:`H`.", "\"\"\" # if not implement `forward`, implement `forward` try : self.x_pred[0] except :", "n_dim_sys] {numpy-array, float} # : covariances of hidden state distributions for times #", "p = mp.Pool(self.num_cpu) G_local = p.starmap(_local_calculation, zip(where_is_A[0], where_is_A[1], itertools.repeat(A), itertools.repeat(y))) p.close() G[A] =", "_predict_update(self, t): \"\"\"Calculate fileter update Args: t {int} : observation time \"\"\" #", "`forward` try : self.x_filt[0] except : self.forward() if dim is None: return self.x_filt", "elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs), dtype=self.dtype) #AA for i in range(self.n_dim_obs):", "dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs,", "at time t given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float}", "Local LOCK for Linear-Gaussian state space models \"\"\" from logging import getLogger, StreamHandler,", "+= time.time() - start_time self.times[4] += 1 self.F = self.F - self.eta *", "None: return self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The", "if t == 0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy()", "A [n_dim_sys, n_dim_sys] {numpy-array, float} # : fixed interval smoothed gain # \"\"\"", "- 1)) : # # visualize calculating times # print(\"\\r smooth calculating... t={}\".format(T", ": \"\"\"Implements the Local LOCK. This class implements the LLOCK, for a Linear", "np self.use_gpu = False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2),", "(self.H @ self.x_pred[t]) ) self.V_filt = self.V_pred - K @ (self.H @ self.V_pred)", "time t \"\"\" # extract parameters for time t R = _last_dims(self.R, t,", "if observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R =", "if True, calculate transition matrix before filtering. if False, calculate the matrix after", "# calculate prediction and filter for every time for t in range(T): #", "`x_filt[t]` and covariances `V_filt`. Args: observation [n_time, n_dim_obs] {numpy-array, float} also known as", "`forward`, implement `forward` try : self.x_filt[0] except : self.forward() if dim is None:", "\\\"forward\\\",\" + \" \\\"middle\\\", or \\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau =", "Returns (numpy-array, float) : mean of hidden state at time t given observations", "time t-1 Q = _last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate predicted", "# # if not implement `filter`, implement `filter` # try : # self.x_pred[0]", "else : start_time = time.time() self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change:", "dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype = dtype) if adjacency_matrix is", ": dimensionality for extract from RTS smoothed result # Returns (numpy-array, float) #", "class implements the LLOCK, for a Linear Gaussian model specified by, .. math::", "from .utils import array1d, array2d from .util_functions import _parse_observations, _last_dims, \\ _determine_dimensionality def", "self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(self.tm_count).zfill(self.fillnum) + \".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim", "start_time if self.xp.any(self.xp.isnan(self.y[t])): self.x_filt[t] = self.x_pred[t] self.V_filt = self.V_pred else : start_time =", "self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t): \"\"\"Calculate fileter update Args: t {int}", ": # self.filter() # T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype", "= self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t", "1] - self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim = None): #", "t # F = _last_dims(self.F, t, 2) # # calculate fixed interval smoothing", "observation_covariance is None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance,", "from RTS smoothed result # Returns (numpy-array, float) # : mean of hidden", "= self.F @ self.x_filt[t-1] self.V_pred = self.F @ self.V_filt @ self.F.T + Q", "Q = _last_dims(self.Q, t - 1, 2, self.use_gpu) # calculate predicted distribution for", "state distributions for times # [0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys]", "'.') def get_filtered_value(self, dim = None): \"\"\"Get filtered value Args: dim {int} :", "\\\"backward\\\".\") if self.estimation_mode in [\"forward\", \"backward\"]: self.tau = int(estimation_length) self.tau2 = int((estimation_length -", "of system transition variable n_dim_obs {int} dimension of observation variable dtype {type} data", "self.H = self.xp.asarray(observation_matrix, dtype = dtype) self.HI = self.xp.linalg.pinv(self.H) if observation_covariance is None:", "[[Q_{t}, O], [O, R_{t}]]) The LLOCK is an algorithm designed to estimate :math:`P(x_t", "LLOCK, for a Linear Gaussian model specified by, .. math:: x_{t+1} &= F_{t}", "covariance adjacency_matrix [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`A`. adjacency matrix, if", "data y # x_smooth [n_time, n_dim_sys] {numpy-array, float} # : mean of hidden", "start_time = time.time() self._filter_update(t) self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"),", "self.V_pred) def _update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int} : observation time", "G /= C #AA if self.tm_count==1: self.F = self.HI @ G @ self.H", "_update_transition_matrix(self, t): \"\"\"Update transition matrix Args: t {int} : observation time \"\"\" G", "get_filtered_value(self, dim = None): \"\"\"Get filtered value Args: dim {int} : dimensionality for", "else: self.F = self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not None: self.Q", "self._update_transition_matrix(t+self.tau-1) elif t >= self.tau+1 and (t-self.tau)%self.I==0 and self.estimation_mode==\"backward\": self._update_transition_matrix(t) elif t >=", "fileter update Args: t {int} : observation time \"\"\" # extract parameters for", "int((estimation_length - 1) / 2) else: self.tau2 = int((estimation_length - 1) / 2)", "\"all-average\": average calculation for each observation dimenstions update_interval {int} interval of update transition", ": # self.smooth() # if dim is None: # return self.x_smooth # elif", "x_{t+1} &= F_{t} x_{t} + b_{t} + v_{t} \\\\ y_{t} &= H_{t} x_{t}", "dtype) if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else:", "initial_covariance [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`\\Sigma_0`. initial state covariance transition_matrix", "R : `observation_covariance` \"\"\" def __init__(self, observation = None, initial_mean = None, initial_covariance", "smoothing gain # A = self.xp.dot(self.V_filt[t], self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed", "# if not implement `forward`, implement `forward` try : self.x_filt[0] except : self.forward()", "None, transition_covariance = None, observation_covariance = None, adjacency_matrix = None, method = \"elementwise\",", "T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t) self.times[0] += time.time()", "transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known as :math:`R`. observation", "self.V_pred else : start_time = time.time() self._filter_update(t) self.times[1] += time.time() - start_time if", "1 #AA C[C==0] = 1 #AA G /= C #AA if self.tm_count==1: self.F", "T {int} : length of data y x_pred [n_time, n_dim_sys] {numpy-array, float} :", "raise ValueError(\"Variable \\\"method\\\" only allows \\\"elementwise\\\", \\\"local-average\\\" \" + \"or \\\"all-average\\\". So, your", "implement `forward`, implement `forward` try : self.x_pred[0] except : self.forward() if dim is", "dtype = dtype) if observation_matrix is None: self.H = self.xp.eye(self.n_dim_obs, self.n_dim_sys, dtype =", "observations from times [0...t] V_filt [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden", "= self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] =", "n_dim_sys, n_dim_sys] {numpy-array, float} # : covariances of hidden state distributions for times", "(notice t range is reversed from 1~T) # for t in reversed(range(T -", "self.xp = np self.use_gpu = False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix,", "observation value initial_mean [n_dim_sys] {float} also known as :math:`\\mu_0`. initial state mean initial_covariance", "= 2 * self.tau2 + 1 self.I = estimation_interval self.tm_count = 1 if", "Local LOCK. This class implements the LLOCK, for a Linear Gaussian model specified", "from times [0...t-1] x_filt [n_time, n_dim_sys] {numpy-array, float} : mean of hidden state", "A.T)) # def get_smoothed_value(self, dim = None): # \"\"\"Get RTS smoothed value #", "t given observations from times [0...t] \"\"\" T = self.y.shape[0] self.x_pred = self.xp.zeros((T,", "self.y.shape[0] self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype", "local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average C = self.xp.zeros((self.n_dim_obs, self.n_dim_obs),", "@ self.H else: self.times[2] += time.time() - start_time Fh = self.HI @ G", "times [0...t-1] \"\"\" # if not implement `forward`, implement `forward` try : self.x_pred[0]", "0: # initial setting self.x_pred[0] = self.initial_mean self.V_pred = self.initial_covariance.copy() self._update_transition_matrix(self.tau) else: if", "specific two observation dimenstions \"all-average\": average calculation for each observation dimenstions update_interval {int}", "int(dim)] else: raise ValueError('The dim must be less than ' + self.x_pred.shape[1] +", "filter for every time for t in range(T): # visualize calculating time print(\"\\r", "self.xp.dot(self.V_smooth[t + 1] - self.V_pred[t + 1], A.T)) # def get_smoothed_value(self, dim =", ": fixed interval smoothed gain # \"\"\" # # if not implement `filter`,", "# t in [0, T-2] (notice t range is reversed from 1~T) #", "implement `smooth`, implement `smooth` # try : # self.x_smooth[0] # except : #", "= self.xp.asarray(transition_matrix, dtype = dtype) if transition_covariance is not None: self.Q = self.xp.asarray(transition_covariance,", "self.V_pred = self.F @ self.V_filt @ self.F.T + Q def _filter_update(self, t): \"\"\"Calculate", "\".npy\"), self.F) self.tm_count += 1 def get_predicted_value(self, dim = None): \"\"\"Get predicted value", "self.save_dir = save_dir self.fillnum = len(str(int(self.y.shape[0] / self.I))) self.xp.save(os.path.join(self.save_dir, \"transition_matrix_\" + str(0).zfill(self.fillnum) +", "t given observations from times [0...t] \"\"\" # if not implement `forward`, implement", "global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\ @ self.xp.linalg.pinv(self.y[t-self.tau:t, global_node_number].T) G[self.xp.ix_(global_node_number, global_node_number)]", "+ \"/\" + str(T), end=\"\") # # extract parameters for time t #", "known as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also", "time \"\"\" # extract parameters for time t-1 Q = _last_dims(self.Q, t -", "self.x_filt elif dim <= self.x_filt.shape[1]: return self.x_filt[:, int(dim)] else: raise ValueError('The dim must", "# self.V_smooth = self.xp.zeros((T, self.n_dim_sys, self.n_dim_sys), # dtype = self.dtype) # A =", "`observation` F : `transition_matrix` Q : `transition_covariance` H : `observation_matrix` R : `observation_covariance`", "self.times[1] += time.time() - start_time if self.save_change: self.xp.save(os.path.join(self.save_dir, \"states.npy\"), self.x_filt) def _predict_update(self, t):", "self.F = self.F - self.eta * self.xp.minimum(self.xp.maximum(-self.cutoff, self.F - Fh), self.cutoff) if self.save_change:", "+= Gh #AA C[self.xp.ix_(global_node_number, global_node_number)] += 1 #AA C[C==0] = 1 #AA G", "update transition matrix F save_dir {str, directory-like} directory for saving transition matrices and", "None: self.R = self.xp.eye(self.n_dim_obs, dtype = dtype) else: self.R = self.xp.asarray(observation_covariance, dtype =", "float} also known as :math:`\\Sigma_0`. initial state covariance transition_matrix [n_dim_sys, n_dim_sys] or [n_dim_sys,", "dtype = self.dtype) # A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1]", "numpy as np from .utils import array1d, array2d from .util_functions import _parse_observations, _last_dims,", "observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden", "\"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import cupy self.xp = cupy self.use_gpu", "from 1~T) # for t in reversed(range(T - 1)) : # # visualize", "[n_dim_sys, n_dim_sys] {numpy-array, float} : covariance of hidden state at time t given", ") # self.y = _parse_observations(observation) self.y = self.xp.asarray(observation).copy() if initial_mean is None: self.initial_mean", "= np self.use_gpu = False # determine dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d,", "'.') # def smooth(self): # \"\"\"Calculate RTS smooth for times. # Args: #", "+ v_{t} \\\\ y_{t} &= H_{t} x_{t} + d_{t} + w_{t} \\\\ [v_{t},", "RTS smooth for times. # Args: # T : length of data y", "as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array, float} also known", "i] += Gh[:, local_node_number] #LA G /= 2.0 #LA elif self.method==\"all-average\": #all-average C", "self.x_pred = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) self.x_filt = self.xp.zeros((T, self.n_dim_sys), dtype =", "# return self.x_smooth[:, int(dim)] # else: # raise ValueError('The dim must be less", "float} also known as :math:`Q`. system transition covariance observation_covariance [n_time, n_dim_obs, n_dim_obs] {numpy-array,", "self.tau2 = int((estimation_length - 1) / 2) else: self.tau2 = int((estimation_length - 1)", "smooth for times. # Args: # T : length of data y #", "these distributions can be represented exactly as Gaussian distributions with mean `x_filt[t]` and", "= dtype) if initial_covariance is None: self.initial_covariance = self.xp.eye(self.n_dim_sys, dtype = dtype) else:", "= self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t in [0, T-2] (notice", "self.Q = self.xp.asarray(transition_covariance, dtype = dtype) else: self.Q = self.xp.eye(self.n_dim_sys, dtype = dtype)", "and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time() self._predict_update(t)", "times # [0...n_times-1] given all observations # A [n_dim_sys, n_dim_sys] {numpy-array, float} #", "self.xp.dot(F.T, self.xp.linalg.pinv(self.V_pred[t + 1]))) # # fixed interval smoothing # self.x_smooth[t] = self.x_filt[t]", "\"elementwise\", estimation_length = 10, estimation_interval = 1, eta = 1., cutoff = 10.,", "dtype) if transition_matrix is None: self.F = self.xp.eye(self.n_dim_sys, dtype = dtype) else: self.F", "# A = self.xp.zeros((self.n_dim_sys, self.n_dim_sys), dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] #", "state at time t given observations from times [0...t-1] V_pred [n_dim_sys, n_dim_sys] {numpy-array,", "if initial_mean is None: self.initial_mean = self.xp.zeros(self.n_dim_sys, dtype = dtype) else: self.initial_mean =", "\"all\"): \"\"\"Setup initial parameters. \"\"\" if use_gpu: try: import cupy self.xp = cupy", "#AA if self.tm_count==1: self.F = self.HI @ G @ self.H else: self.times[2] +=", "dtype {type} data type of numpy-array use_gpu {bool} wheather use gpu and cupy.", "y = self.y[t-self.tau:t+1].get() else: A = self.A y = self.y[t-self.tau:t+1] where_is_A = np.where(A)", "#AA for i in range(self.n_dim_obs): global_node_number = self.xp.where(self.A[i])[0] Gh = self.y[t-self.tau+1:t+1, global_node_number].T \\", "observation time Attributes: K [n_dim_sys, n_dim_obs] {numpy-array, float} : Kalman gain matrix for", "transition matrix. you can set `all` or positive integer. Attributes: y : `observation`", "float} : mean of hidden state at time t given observations from times", "# T = self.y.shape[0] # self.x_smooth = self.xp.zeros((T, self.n_dim_sys), dtype = self.dtype) #", "self.x_pred.shape[1] + '.') def get_filtered_value(self, dim = None): \"\"\"Get filtered value Args: dim", "-1)], n_dim_sys, self.use_gpu ) self.n_dim_obs = _determine_dimensionality( [(observation_matrix, array2d, -2), (observation_covariance, array2d, -2),", "self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # # t in [0, T-2]", "transition_covariance [n_time-1, n_dim_sys, n_dim_sys] or [n_dim_sys, n_dim_sys] {numpy-array, float} also known as :math:`Q`.", "self.tau2+2 and t < T-self.tau2 and (t-self.tau2-1)%self.I==0 and self.estimation_mode==\"middle\": self._update_transition_matrix(t+self.tau2) start_time = time.time()", ">= 2 and t < T-self.tau+1 and (t-1)%self.I==0 and self.estimation_mode==\"forward\": self._update_transition_matrix(t+self.tau-1) elif t", ": covariance of hidden state at time t given observations from times [0...t-1]", "t={}\".format(T - t) # + \"/\" + str(T), end=\"\") # # extract parameters", "{string} : method for localized calculation \"elementwise\": calculation for each element of transition", "= \"elementwise\", estimation_length = 10, estimation_interval = 1, eta = 1., cutoff =", "if dim is None: # return self.x_smooth # elif dim <= self.x_smooth.shape[1]: #", "= True except: self.xp = np self.use_gpu = False else: self.xp = np", "dtype = self.dtype) # self.x_smooth[-1] = self.x_filt[-1] # self.V_smooth[-1] = self.V_filt[-1] # #", "mean of hidden state at time t given observations from times [0...t] V_filt", "matrix F cutoff cutoff distance for update transition matrix F save_dir {str, directory-like}", "try : # self.x_pred[0] # except : # self.filter() # T = self.y.shape[0]", "= self.xp.eye(dtype=bool) else: self.A = self.xp.asarray(adjacency_matrix, dtype = bool) if method in [\"elementwise\",", "= None, observation_covariance = None, adjacency_matrix = None, method = \"elementwise\", estimation_length =", "dimensionality self.n_dim_sys = _determine_dimensionality( [(transition_matrix, array2d, -2), (initial_mean, array1d, -1), (initial_covariance, array2d, -2)," ]
[]
[ "library. # # Spot is free software; you can redistribute it and/or modify", "de l'Epita # # This file is part of Spot, a model checking", "to build the sum of two automata with # different dictionaries. aut1 =", "RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False)", "License, or # (at your option) any later version. # # Spot is", "= spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts = spot.option_map()", "# different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2)", "opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict =", "rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi = []", "phi2 = produce_phi(rg, 1000) inputres = [] aut = [] for p in", "either version 3 of the License, or # (at your option) any later", "= produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres = [] aut = []", "= [] aut = [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 =", "spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output',", "of two automata with # different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb',", "2017, 2018 Laboratoire de Recherche et Développement de l'Epita # # This file", "in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi(", "software; you can redistribute it and/or modify it # under the terms of", "version 3 of the License, or # (at your option) any later version.", "published by # the Free Software Foundation; either version 3 of the License,", "# # This file is part of Spot, a model checking library. #", "implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the", "= spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres): assert", "# the Free Software Foundation; either version 3 of the License, or #", "ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR", "= spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass", "n): phi = [] while len(phi) < n: f = rg.next() if f.is_syntactic_persistence():", "# Spot is free software; you can redistribute it and/or modify it #", "phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres = [] aut", "the License, or # (at your option) any later version. # # Spot", "If not, see <http://www.gnu.org/licenses/>. import spot import sys import itertools # make sure", "spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1])", "coding: utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement", "Free Software Foundation; either version 3 of the License, or # (at your", "-*- mode: python; coding: utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire de", "A PARTICULAR PURPOSE. See the GNU General Public # License for more details.", "build the sum of two automata with # different dictionaries. aut1 = spot.translate('Xa')", "= [] inputres = [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 =", "this program. If not, see <http://www.gnu.org/licenses/>. import spot import sys import itertools #", "that we are not allowed to build the sum of two automata with", "part of Spot, a model checking library. # # Spot is free software;", "and/or modify it # under the terms of the GNU General Public License", "terms of the GNU General Public License as published by # the Free", "General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import", "import sys import itertools # make sure that we are not allowed to", "(C) 2017, 2018 Laboratoire de Recherche et Développement de l'Epita # # This", "spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts", "= spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level',", "it and/or modify it # under the terms of the GNU General Public", "import spot import sys import itertools # make sure that we are not", "as published by # the Free Software Foundation; either version 3 of the", "of the GNU General Public License # along with this program. If not,", "dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut,", "<http://www.gnu.org/licenses/>. import spot import sys import itertools # make sure that we are", "License for more details. # # You should have received a copy of", "1000) phi2 = produce_phi(rg, 1000) inputres = [] aut = [] for p", "checking library. # # Spot is free software; you can redistribute it and/or", "python; coding: utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire de Recherche et", "can redistribute it and/or modify it # under the terms of the GNU", "# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public #", "[] while len(phi) < n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi", "a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for", "version. # # Spot is distributed in the hope that it will be", "spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while len(phi) < n: f =", "phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2),", "if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000)", "of the GNU General Public License as published by # the Free Software", "warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU", "dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut", "p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict)", "with this program. If not, see <http://www.gnu.org/licenses/>. import spot import sys import itertools", "0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi", "f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres", "useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY", "This file is part of Spot, a model checking library. # # Spot", "you can redistribute it and/or modify it # under the terms of the", "[] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 =", "in the hope that it will be useful, but WITHOUT # ANY WARRANTY;", "utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement de", "Foundation; either version 3 of the License, or # (at your option) any", "of the License, or # (at your option) any later version. # #", "# under the terms of the GNU General Public License as published by", "should have received a copy of the GNU General Public License # along", "the GNU General Public License # along with this program. If not, see", "-*- # Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement de l'Epita", "two automata with # different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict())", "is free software; you can redistribute it and/or modify it # under the", "inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True)))", "the GNU General Public License as published by # the Free Software Foundation;", "[] inputres = [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0],", "et Développement de l'Epita # # This file is part of Spot, a", "p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict)", "spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = []", "produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres = [] aut = [] for", "= spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p", "= [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2", "aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError:", "it # under the terms of the GNU General Public License as published", "try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min',", "de Recherche et Développement de l'Epita # # This file is part of", "# Spot is distributed in the hope that it will be useful, but", "of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General", "spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0)", "is distributed in the hope that it will be useful, but WITHOUT #", "f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2", "for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1],", "FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more", "a model checking library. # # Spot is free software; you can redistribute", "= spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while", "< n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg,", "Spot is distributed in the hope that it will be useful, but WITHOUT", "hope that it will be useful, but WITHOUT # ANY WARRANTY; without even", "Laboratoire de Recherche et Développement de l'Epita # # This file is part", "the sum of two automata with # different dictionaries. aut1 = spot.translate('Xa') aut2", "while len(phi) < n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1", "file is part of Spot, a model checking library. # # Spot is", "along with this program. If not, see <http://www.gnu.org/licenses/>. import spot import sys import", "inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True)))", "PURPOSE. See the GNU General Public # License for more details. # #", "0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg,", "You should have received a copy of the GNU General Public License #", "the hope that it will be useful, but WITHOUT # ANY WARRANTY; without", "phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres =", "opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def", "# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS", "spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg", "will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty", "= spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while len(phi) < n: f", "see <http://www.gnu.org/licenses/>. import spot import sys import itertools # make sure that we", "rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg,", "See the GNU General Public # License for more details. # # You", "False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict()", "Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement de l'Epita # #", "(at your option) any later version. # # Spot is distributed in the", "details. # # You should have received a copy of the GNU General", "15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2,", "# along with this program. If not, see <http://www.gnu.org/licenses/>. import spot import sys", "that it will be useful, but WITHOUT # ANY WARRANTY; without even the", "spot import sys import itertools # make sure that we are not allowed", "General Public License as published by # the Free Software Foundation; either version", "p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres = [] for", "not, see <http://www.gnu.org/licenses/>. import spot import sys import itertools # make sure that", "phi = [] while len(phi) < n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f)", "sum of two automata with # different dictionaries. aut1 = spot.translate('Xa') aut2 =", "the GNU General Public # License for more details. # # You should", "received a copy of the GNU General Public License # along with this", "aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts =", "aut2) exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max',", "spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p in", "spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15)", "opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0)", "= spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p in zip(aut, inputres): assert", "l'Epita # # This file is part of Spot, a model checking library.", "the terms of the GNU General Public License as published by # the", "model checking library. # # Spot is free software; you can redistribute it", "or # (at your option) any later version. # # Spot is distributed", "2018 Laboratoire de Recherche et Développement de l'Epita # # This file is", "Spot is free software; you can redistribute it and/or modify it # under", "dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL)", "opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg =", "any later version. # # Spot is distributed in the hope that it", "opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while len(phi) <", "spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in", "by # the Free Software Foundation; either version 3 of the License, or", "MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public", "modify it # under the terms of the GNU General Public License as", "WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A", "PARTICULAR PURPOSE. See the GNU General Public # License for more details. #", "Spot, a model checking library. # # Spot is free software; you can", "spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi =", "return phi phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres = []", "opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0)", "copy of the GNU General Public License # along with this program. If", "opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n):", "spot.randltlgenerator(2, opts) dict = spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while len(phi)", "[] aut = [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0],", "make sure that we are not allowed to build the sum of two", "inputres = [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict)", "a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres):", "Public License as published by # the Free Software Foundation; either version 3", "inputres = [] aut = [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1", "assert p[0].equivalent_to(p[1]) aut = [] inputres = [] for p in zip(phi1, phi2):", "dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p in zip(aut,", "be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of", "without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR", "it will be useful, but WITHOUT # ANY WARRANTY; without even the implied", "# # You should have received a copy of the GNU General Public", "mode: python; coding: utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire de Recherche", "a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for", "zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres = [] for p in", "FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for", "your option) any later version. # # Spot is distributed in the hope", "with # different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1,", "opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts)", "License as published by # the Free Software Foundation; either version 3 of", "not allowed to build the sum of two automata with # different dictionaries.", "= produce_phi(rg, 1000) inputres = [] aut = [] for p in zip(phi1,", "have received a copy of the GNU General Public License # along with", "spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1])", "15) opts.set('wf', False) opts.set('seed', 0) opts.set('simplification_level', 0) spot.srand(0) rg = spot.randltlgenerator(2, opts) dict", "# # Spot is distributed in the hope that it will be useful,", "Public # License for more details. # # You should have received a", "zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1,", "pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf', False) opts.set('seed',", "for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1],", "in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres = [] for p", "# You should have received a copy of the GNU General Public License", "Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import spot", "len(phi) < n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 =", "except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15) opts.set('wf',", "= [] while len(phi) < n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return", "GNU General Public # License for more details. # # You should have", "= spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p", "later version. # # Spot is distributed in the hope that it will", "the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See", "WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or", "# (at your option) any later version. # # Spot is distributed in", "but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY #", "a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres", "# Copyright (C) 2017, 2018 Laboratoire de Recherche et Développement de l'Epita #", "# This file is part of Spot, a model checking library. # #", "automata with # different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try:", "free software; you can redistribute it and/or modify it # under the terms", "sure that we are not allowed to build the sum of two automata", "# # Spot is free software; you can redistribute it and/or modify it", "is part of Spot, a model checking library. # # Spot is free", "GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.", "different dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2)", "def produce_phi(rg, n): phi = [] while len(phi) < n: f = rg.next()", "or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License", "a copy of the GNU General Public License # along with this program.", "1000) inputres = [] aut = [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p))", "[] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 =", "of Spot, a model checking library. # # Spot is free software; you", "program. If not, see <http://www.gnu.org/licenses/>. import spot import sys import itertools # make", "= [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2", "aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1, a2), True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut =", "3 of the License, or # (at your option) any later version. #", "dict = spot.make_bdd_dict() def produce_phi(rg, n): phi = [] while len(phi) < n:", "aut = [] inputres = [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1", "# make sure that we are not allowed to build the sum of", "Recherche et Développement de l'Epita # # This file is part of Spot,", "we are not allowed to build the sum of two automata with #", "in zip(phi1, phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi(", "produce_phi(rg, 1000) inputres = [] aut = [] for p in zip(phi1, phi2):", "aut = [] for p in zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict)", "for more details. # # You should have received a copy of the", "True))) for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres =", "# License for more details. # # You should have received a copy", "are not allowed to build the sum of two automata with # different", "zip(phi1, phi2): inputres.append(spot.formula.Or(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum(a1,", "p[0].equivalent_to(p[1]) aut = [] inputres = [] for p in zip(phi1, phi2): inputres.append(spot.formula.And(p))", "dictionaries. aut1 = spot.translate('Xa') aut2 = spot.translate('Xb', dict=spot.make_bdd_dict()) try: spot.sum(aut1, aut2) exit(2) except", "Développement de l'Epita # # This file is part of Spot, a model", "<filename>lib/spot-2.8.1/tests/python/sum.py # -*- mode: python; coding: utf-8 -*- # Copyright (C) 2017, 2018", "allowed to build the sum of two automata with # different dictionaries. aut1", "n: f = rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000)", "GNU General Public License as published by # the Free Software Foundation; either", "sys import itertools # make sure that we are not allowed to build", "option) any later version. # # Spot is distributed in the hope that", "a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2), True))) for p in zip(aut, inputres):", "itertools # make sure that we are not allowed to build the sum", "more details. # # You should have received a copy of the GNU", "exit(2) except RuntimeError: pass opts = spot.option_map() opts.set('output', spot.randltlgenerator.LTL) opts.set('tree_size_min', 15) opts.set('tree_size_max', 15)", "the Free Software Foundation; either version 3 of the License, or # (at", "= rg.next() if f.is_syntactic_persistence(): phi.append(f) return phi phi1 = produce_phi(rg, 1000) phi2 =", "phi1 = produce_phi(rg, 1000) phi2 = produce_phi(rg, 1000) inputres = [] aut =", "phi2): inputres.append(spot.formula.And(p)) a1 = spot.ltl_to_tgba_fm(p[0], dict) a2 = spot.ltl_to_tgba_fm(p[1], dict) aut.append(spot.to_generalized_buchi( spot.remove_alternation(spot.sum_and(a1, a2),", "inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres = [] for p in zip(phi1,", "under the terms of the GNU General Public License as published by #", "License # along with this program. If not, see <http://www.gnu.org/licenses/>. import spot import", "redistribute it and/or modify it # under the terms of the GNU General", "# -*- mode: python; coding: utf-8 -*- # Copyright (C) 2017, 2018 Laboratoire", "distributed in the hope that it will be useful, but WITHOUT # ANY", "produce_phi(rg, n): phi = [] while len(phi) < n: f = rg.next() if", "for p in zip(aut, inputres): assert p[0].equivalent_to(p[1]) aut = [] inputres = []", "Software Foundation; either version 3 of the License, or # (at your option)", "import itertools # make sure that we are not allowed to build the", "General Public # License for more details. # # You should have received", "even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE." ]
[ "from .user import user_router from .match import match_router # add individual routers to", "session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome to UMass Match!\"} from", "import user_router from .match import match_router # add individual routers to top-level router", "root(): return {\"msg\": \"Welcome to UMass Match!\"} from .user import user_router from .match", "def root(): return {\"msg\": \"Welcome to UMass Match!\"} from .user import user_router from", "status_code=200) async def root(): return {\"msg\": \"Welcome to UMass Match!\"} from .user import", "session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome to UMass", "main_router = APIRouter() from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async", "from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return", "return {\"msg\": \"Welcome to UMass Match!\"} from .user import user_router from .match import", "= APIRouter() from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def", "\"Welcome to UMass Match!\"} from .user import user_router from .match import match_router #", "from .match import match_router # add individual routers to top-level router main_router.include_router(user_router) main_router.include_router(match_router)", "APIRouter main_router = APIRouter() from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200)", "UMass Match!\"} from .user import user_router from .match import match_router # add individual", "async def root(): return {\"msg\": \"Welcome to UMass Match!\"} from .user import user_router", "user_router from .match import match_router # add individual routers to top-level router main_router.include_router(user_router)", "{\"msg\": \"Welcome to UMass Match!\"} from .user import user_router from .match import match_router", "import APIRouter main_router = APIRouter() from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\",", "to UMass Match!\"} from .user import user_router from .match import match_router # add", "@main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome to UMass Match!\"} from .user", "session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome to", ".user import user_router from .match import match_router # add individual routers to top-level", "import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome", "from fastapi import APIRouter main_router = APIRouter() from resources.db import session_dependency session_dep =", "APIRouter() from resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root():", "fastapi import APIRouter main_router = APIRouter() from resources.db import session_dependency session_dep = session_dependency()", "= session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\": \"Welcome to UMass Match!\"}", "resources.db import session_dependency session_dep = session_dependency() @main_router.get(\"/\", status_code=200) async def root(): return {\"msg\":", "Match!\"} from .user import user_router from .match import match_router # add individual routers" ]
[ "feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1] is None", "self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg for", "output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1", "__init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length", "torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos", "= src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for", "dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim,", "d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1)", "embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)", "i in range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos)", "d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads,", "nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src for i in range(self.N): x", "pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in", "self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if", "self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x", "super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm =", "attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class", "def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\")", "class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder", "= input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier:", "None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output)", "attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2)", "self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not None: extra_input_for_FF", "N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self,", "= nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier", "extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim = -1) #output", "pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 =", "DecoderLayer from Sublayers import Norm, OutputFeedForward import copy import attention_setting import numpy as", "= customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64", "self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len", "= src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len + 1 pos =", "1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None,", "if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF),", "d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model,", "super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 =", "numpy as np import crispr_attn import math import OT_crispr_attn import sys import importlib", "crispr_attn import math import OT_crispr_attn import sys import importlib import pdb # Setting", "feature_len_map, classifier = True) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p)", "1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: # model = model.cuda() return model", "else \"\" config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N):", "embedded_pos + embedded_src if self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else:", "def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers =", "= self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 =", "None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2", "= -1) pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model,", "2) // 2 + 2) // 2)) * config.embedding_vec_dim) d_input = d_input_1 +", "-1) pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N,", "config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self, src, trg", "import sys import importlib import pdb # Setting the correct config file config_path", "1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads", "= d_input * d_model d_input_2 = ((64 * (((d_input + 2) // 2", "embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg =", "d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N,", "heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn", "attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p)", "= self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x =", "classifier def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src)", "dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn", "cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__()", "Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers", "embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos", "attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim,", "'deepCrispr': d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input *", "return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout,", "pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if", "def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs", "nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1),", "= 20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model", "heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size,", "import importlib import pdb # Setting the correct config file config_path = \".\".join([\"models\",", "embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 =", "if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output,", "= Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding", "embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src", "% attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if not classifier: model =", "if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))", "is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg =", "self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output =", "F from Layers import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import copy", "2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model = 1 self.out =", "= F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1)", "1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features +", "+ 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self, src, trg =", "x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self,", "0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 = ((64 * (((d_input", "attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output =", "forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x =", "d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn:", "d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding", "len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\")", "-1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N,", "src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src =", "p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0:", "range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src =", "== 0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads,", "super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N,", "= pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is", "self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim =", "forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output", "x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map,", "src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src", "extra_input_for_FF = None if self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]]", "EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model,", "self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model)", "d_input = 64 * (((d_input + 2) // 2 + 2) // 2)", "extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)", "= d_input_1 + d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers,", "nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self,", "0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 =", "config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input,", "is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF,", "= cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs = src.size(0) pos_length =", "if self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src,", "nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else:", "else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for", "* d_model d_input_2 = ((64 * (((d_input + 2) // 2 + 2)", "config.word_len + 1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in", "import cat, transpose import torch import torch.nn.functional as F from Layers import EncoderLayer,", "d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src,", "self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout", "2) if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn:", "x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def", "config.seq_len - config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i for i in", "self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads,", "d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask)", "- config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self, src,", "Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear", "get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None,", "self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32,", "embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output", "importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def", "== 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output,", "= src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]),", "if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1", "output = F.log_softmax(output, dim = -1) #output = F.softmax(output, dim = -1) pass", "OT_crispr_attn import sys import importlib import pdb # Setting the correct config file", "nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len", "import attention_setting import numpy as np import crispr_attn import math import OT_crispr_attn import", "class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input,", "import math import OT_crispr_attn import sys import importlib import pdb # Setting the", "self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN()", "as F from Layers import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import", "d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self,", "self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads,", "#self.linear = nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if", "= input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos)", "= nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64,", "None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 =", "src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len", "return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N,", "attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1)", "= src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)]))", "self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x =", "classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1] is None else", "pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)])) pos =", "output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False): assert", "self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis", "extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads ==", "= nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x)", "__init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input,", "x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model,", "as nn from torch import cat, transpose import torch import torch.nn.functional as F", "return output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads == 0", "importlib import pdb # Setting the correct config file config_path = \".\".join([\"models\", sys.argv[1]])", "dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src for", "nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def", "mask=None): x = src for i in range(self.N): x = self.layers[i](x, mask) return", "for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if", "+ embedded_src if self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg", "= self.dropout(embedded_src_1) if trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos)", "heads, dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N)", "dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm", "Setting the correct config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv)", "e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self,", "embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len -", "import copy import attention_setting import numpy as np import crispr_attn import math import", "= Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model", "embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None,", "attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim,", "extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output", "'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF),", "nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1,", "model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,", "= self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is", "d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1", "nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1,", "= attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x =", "self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src)", "self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs = src.size(0)", ", d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout,", "embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2,", "= self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output", "OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs", "bs = src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len + 1 pos", "for i in range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos =", "= nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout =", "extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout,", "embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)", "1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not", "for _ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout):", "heads, dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N)", "= None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2", "\".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path", "self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input,", "nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def", "d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout,", "N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding", "if len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path + \"config\") attention_setting =", "trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg", "self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding", "trg, e_outputs, src_mask=None, trg_mask=None): x = trg for i in range(self.N): x =", "+= 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2", "\"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])", "src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output =", "self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg", "nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout)", "= embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 =", "attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input,", "for i in range(self.N): x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else", "d_model d_input_2 = ((64 * (((d_input + 2) // 2 + 2) //", "self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is not None: trg =", "assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if not classifier:", "= feature_len_map extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0]", "in range(self.N): x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x class", "// 2 + 2) // 2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2", "= self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim", "from Layers import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import copy import", "2) // 2) if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length = 0", "2 + 2) // 2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model", "x class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N =", "src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1)", "= src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg", "extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model,", "attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 =", "0 assert attention_setting.attention_dropout < 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers,", "attention_setting.attention_dropout, feature_len_map, classifier = True) for p in model.parameters(): if p.dim() > 1:", "= self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg)", "= len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length)", "kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if", "forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1", "dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn =", "-1, 4) d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1)", "+ embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not None: embedded_trg = self.trg_embedding(trg)", "= embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not None: embedded_trg", "d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model", "= pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1)", "= extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim =", "embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self,", "get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout <", "= cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output)", "src_mask=None, trg_mask=None): x = trg for i in range(self.N): x = self.layers[i](x, e_outputs,", "self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer", "- self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding =", "trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg =", "= self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src)", "dim = -1) #output = F.softmax(output, dim = -1) pass return output class", "assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input,", "dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) #", "self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src", "for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None,", "trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input, d_model,", "+ 1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)]))", "N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos", "= nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len +", "embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier", "return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input, d_model, N,", "output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1,", "+ 2) // 2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model =", "def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout", "= cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self):", "-1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis !=", "= 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1]", "input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0)", "x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__()", "nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x", "= self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not None: extra_input_for_FF = input[:,", "Sublayers import Norm, OutputFeedForward import copy import attention_setting import numpy as np import", "def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))", "dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input,", "self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF", "64 * (((d_input + 2) // 2 + 2) // 2) if attention_setting.analysis", "= nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos =", "embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if", "// 2 + 2) // 2) if attention_setting.analysis == 'deepCrispr': d_model += 4", "src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)])) pos", "pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N, heads,", "self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None): src =", "1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p", "src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF", "self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg,", "= self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32,", "extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src,", "and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output", "flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output,", "embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p =", "embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2", "dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)", "Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers", "not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input +", "embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim)", "None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask)", "self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x", "not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2) // 2", "= EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if", "d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None", "= self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i", "if attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2) // 2 + 2)", "output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length):", "attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model,", "d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x =", "% attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim,", "embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not None: extra_input_for_FF =", "transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier", "in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos", "extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2", "self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p =", "= Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout)", "= -1) #output = F.softmax(output, dim = -1) pass return output class EmbeddingTransformer(Transformer):", "trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg,", "= F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module):", "src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)] for _", "= src for i in range(self.N): x = self.layers[i](x, mask) return self.norm(x) if", "not None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output =", "attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers,", "= self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self,", "attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout,", "in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src", "Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder =", "extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 = ((64", "x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class", "is not None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs,", "not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model =", "attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim() > 1:", "embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is not None:", "src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len = src.size(1)", "= N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def", "x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class", "attention_setting import numpy as np import crispr_attn import math import OT_crispr_attn import sys", "EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import copy import attention_setting import numpy", "None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input,", "src, mask=None): x = src for i in range(self.N): x = self.layers[i](x, mask)", "trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg +", "> 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model %", "e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if", "nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input))", "self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output", "embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)", "def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map", "return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False):", "extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output,", "not None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1,", "d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0", "return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0))", "or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2) //", "= d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1)", "extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1", "< 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map)", "config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module)", "model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: # model", "heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not", "d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads,", "config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else", "else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg", "dim=1) else: embedded_src = self.embedding(src) bs = src.size(0) pos_length = config.seq_len - config.seq_start", "as np import crispr_attn import math import OT_crispr_attn import sys import importlib import", "return model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads == 0", "trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr': bs", "self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0),", "else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2,", "embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim = -1)", "OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map =", "pos_length = config.seq_len - config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i for", "attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers,", "file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else \"\"", "kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 =", "= nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x =", "in range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1", "< 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features", "Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert not", "= super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model", "# Setting the correct config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if", "heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1]", "= OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers,", "self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier =", "range(self.N): x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module):", "= self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and", "= self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return", "self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i in", "= self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and", "= importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for", "import crispr_attn import math import OT_crispr_attn import sys import importlib import pdb #", "flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0),", "torch import cat, transpose import torch import torch.nn.functional as F from Layers import", "i in range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos)", "i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm", "extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim = 2)", "src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:]", "4) d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output", "self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i", "self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos", "- config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim)", "d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output =", "class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map", "embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is", "#embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def", "in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input =", "sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path +", "nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads,", "= importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module):", "d_input * d_model d_input_2 = ((64 * (((d_input + 2) // 2 +", "attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__()", "self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim)", "= src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src", "if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads,", "+ 2) // 2) if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length =", "= self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is not None: trg", "def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x", "src for i in range(self.N): x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm", "attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x))", "attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2) // 2 +", "heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x", "flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr':", "= config.seq_len - config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i for i", "1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier =", "cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs = src.size(0) pos_length = config.seq_len", "x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__()", "is not None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output", "dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm", "forward(self, src, mask=None): x = src for i in range(self.N): x = self.layers[i](x,", "super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 =", "copy import attention_setting import numpy as np import crispr_attn import math import OT_crispr_attn", "trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len]", "= nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn:", "= self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs =", "d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size,", "4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 =", "1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def forward(self, src, trg = None,", "N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src for i", "2 else \"\" config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module,", "Norm, OutputFeedForward import copy import attention_setting import numpy as np import crispr_attn import", "extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output =", "else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model,", "embedded_src = self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for", "self.classifier: # output = F.log_softmax(output, dim = -1) #output = F.softmax(output, dim =", "embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None): src", "- self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N,", "= extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim = 2) d_output =", "feature_len_map extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input", "src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr':", "sys import importlib import pdb # Setting the correct config file config_path =", "self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None,", "embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output", "False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if not", "d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs =", "import torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer from Sublayers import Norm,", "extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if", "src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if", "= embedded_pos + embedded_src if self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long()", "len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for", "the correct config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >=", "= True) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model", "Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding =", "self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src,", "OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads,", "= self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output =", "cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr': flat_d_output =", "= classifier def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src =", "= input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos", "self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg,", "self.dropout(embedded_src_1) if trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1", "attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p in model.parameters(): if", "embedded_src if self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg =", "super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm =", "embedding_vec_dim, d_model, N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length =", "padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len", "((64 * (((d_input + 2) // 2 + 2) // 2)) * config.embedding_vec_dim)", "_ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos +", "= nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len +", "self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input =", "nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None):", "range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if", "attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map,", "attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads,", "model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20):", "N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout),", "get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input,", "(((d_input + 2) // 2 + 2) // 2)) * config.embedding_vec_dim) d_input =", "x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x", "extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1)", "input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: #", "= nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos =", "= get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs,", "attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p in model.parameters(): if p.dim() >", "self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] -", "classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim,", "get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout <", "extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim)", "- config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)]", "trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len =", "attention_setting.attention_dropout < 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout,", "__init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model,", "nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len", "if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 = ((64 * (((d_input +", "# print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is", "extra_feature_length) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device", "d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model,", "= OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p in", "else: embedded_src = self.embedding(src) bs = src.size(0) pos_length = config.seq_len - config.seq_start -", "torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src =", "else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input):", "x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2))", "extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input =", "transpose import torch import torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer from", "pdb # Setting the correct config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\"", "range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N =", "* config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model = 1 self.out = OutputFeedForward(d_model,", "embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not None: embedded_trg =", "extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF =", "attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 = ((64 * (((d_input + 2)", "def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout", "cat((d_output, extra_input_for_FF), dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output", "= F.log_softmax(output, dim = -1) #output = F.softmax(output, dim = -1) pass return", "embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs = src.size(0) pos_length", "from torch import cat, transpose import torch import torch.nn.functional as F from Layers", "in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: #", "!= 0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2", "20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model =", "self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size,", "is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg +", "F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def", "self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def", "embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is not None: trg = input[:,", "self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]:", "config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else \"\" config", "= embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return", "def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def __init__(self,", "def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg for i in range(self.N):", "customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1),", "range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2", "class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 =", "_ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__()", "= self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn:", "2) // 2 + 2) // 2) if attention_setting.analysis == 'deepCrispr': d_model +=", "d_input_2 = ((64 * (((d_input + 2) // 2 + 2) // 2))", "dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1] is", "'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module):", "get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x", "padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1),", "embedded_src_2 = self.dropout(embedded_src_1) if trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos =", "nn from torch import cat, transpose import torch import torch.nn.functional as F from", "i in range(self.N): x = self.layers[i](x, mask) return self.norm(x) if attention_setting.attention_layer_norm else x", "attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier", "if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else: model", "= self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1)", "importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _", "not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask,", "= self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)", "embedded_src = self.embedding(src) bs = src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len", "d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim() >", "src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input,", "input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs = src.size(0) pos_len = src.size(1) pos =", "= torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF", "print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not", "= embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None", "src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask,", "= N self.layers = get_clones(DecoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def", "= self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not None:", "trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src", "model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert", "self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src for i in", "__init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N,", "N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout),", "None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4)", "= 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg,", "pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1] is not", "self.decoder(trg, e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis", "config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout", "forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long() embedded_src = self.embedding(src) bs =", "in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N", "range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x", "+ embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1,", "N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear()", "not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg)", "import Norm, OutputFeedForward import copy import attention_setting import numpy as np import crispr_attn", "assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 * (((d_input", "x = src for i in range(self.N): x = self.layers[i](x, mask) return self.norm(x)", "self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer = Transformer(d_input, d_model, embedding_vec_dim,", "dim = 2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1,", "nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(d_input,", "= Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert", "attention_setting.attention_dropout, feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier =", "if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0) extra_input_for_FF", "np import crispr_attn import math import OT_crispr_attn import sys import importlib import pdb", "= x.contiguous().view(x.size(0), -1, x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim,", "attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if", "+ d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def", "= transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map,", "2 + 2) // 2) if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length", "dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size,", "import torch import torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer from Sublayers", "config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len !=", "self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else:", "= self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src =", "d_model, N, heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn", "1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src,", "from Sublayers import Norm, OutputFeedForward import copy import attention_setting import numpy as np", "src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 =", "super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model %", "padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 =", "= 64 * (((d_input + 2) // 2 + 2) // 2) if", "+ embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2]", "import torch.nn as nn from torch import cat, transpose import torch import torch.nn.functional", "dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 =", "attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output", "= nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None,", "config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1)", "nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model % attention_setting.attention_heads ==", "def __init__(self, embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim,", "nn.Dropout(p = config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if", "p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: # model = model.cuda()", "= torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2)", "p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input", "e_outputs, src_mask=None, trg_mask=None): x = trg for i in range(self.N): x = self.layers[i](x,", "attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr': bs = extra_input_for_FF.size(0)", "pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)])) pos =", "def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim,", "config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in", "attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout,", "None if self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output =", "F.log_softmax(output, dim = -1) #output = F.softmax(output, dim = -1) pass return output", "1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map) else:", "= False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if", "x.size(-1) * x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads,", "= nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout)", "self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not", "self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos", "+ 2) // 2 + 2) // 2) if attention_setting.analysis == 'deepCrispr': d_model", "if trg is not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 =", "feature_len_map) else: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True)", "+ config.extra_numerical_features) model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p", "d_input = d_input_1 + d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length,", "class Encoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N", "src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2)", "dim = -1) pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim , d_input,", "import OT_crispr_attn import sys import importlib import pdb # Setting the correct config", "2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False):", "pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos + embedded_src if self.feature_len_map[1]", "// 2) if attention_setting.analysis == 'deepCrispr': d_model += 4 extra_length = 0 if", "import pdb # Setting the correct config file config_path = \".\".join([\"models\", sys.argv[1]]) +", "N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg", "= nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg for i", "self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1),", "self.dropout = nn.Dropout(p = config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None,", "x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1,", "0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout)", "not None: embedded_trg = self.trg_embedding(trg) embedded_trg_pos = self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos", "trg for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x)", "in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else", "else x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N, heads, dropout, extra_length):", "(((d_input + 2) // 2 + 2) // 2) if attention_setting.analysis == 'deepCrispr':", "d_input_1 + d_input_2 d_model = 1 self.out = OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout)", "32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0))", "> 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: # model = model.cuda() return", "self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg =", "attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) #", "src_2 = src[:, config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src,", "src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i", "math import OT_crispr_attn import sys import importlib import pdb # Setting the correct", "= nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout) def", "- config.word_len + 1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _", "True) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return model def", "attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length =", "__init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(DecoderLayer(d_input,", "+ 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout =", "#model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features)", "trg_mask=None): x = trg for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask,", "embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs = src.size(0) pos_length = config.seq_len -", "mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input, d_model,", "embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2", "if attention_setting.attention_layer_norm else x class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout):", "return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim,", "output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert", "torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward", "attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model,", "inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None", "input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg", "\"\" config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return", "config.seq_start - config.word_len + 1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for", "= cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is not None and attention_setting.analysis != 'deepCrispr': flat_d_output", "embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2)", "embedded_trg = self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src =", "import numpy as np import crispr_attn import math import OT_crispr_attn import sys import", "else x class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N", "attention_setting.add_seq_cnn: d_input = 64 * (((d_input + 2) // 2 + 2) //", "d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size,", "1 pos = torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)])) pos", "for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device ==", "2) // 2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model = 1", "d_model, N, heads, dropout): super().__init__() self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads,", "import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import copy import attention_setting import", "self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module): def __init__(self, d_input, d_model, n_feature_dim, N,", "def forward(self, src, mask=None): x = src for i in range(self.N): x =", "if p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20): assert", "N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) class Encoder(nn.Module): def __init__(self, d_input, d_model,", "Layers import EncoderLayer, DecoderLayer from Sublayers import Norm, OutputFeedForward import copy import attention_setting", "== 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout =", "\".\" if len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path + \"config\") attention_setting", "= self.layers[i](x, e_outputs, src_mask, trg_mask) return self.norm(x) if attention_setting.attention_layer_norm else x class Transformer(nn.Module):", "p.dim() > 1: nn.init.xavier_uniform_(p) return model def get_model(inputs_lengths=None, d_input = 20): assert attention_setting.d_model", "assert attention_setting.attention_dropout < 1 #model = Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length", "extra_input_for_FF=None, src_mask=None, trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs,", "cat, transpose import torch import torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer", "bs = src.size(0) pos_len = src.size(1) pos = torch.from_numpy(np.array([[i for i in range(pos_len)]", "Transformer(d_input, attention_setting.d_model, attention_setting.n_feature_dim, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout) extra_feature_length = len(config.extra_categorical_features + config.extra_numerical_features) model =", "range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 =", "nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg for i in", "trg_mask=None): e_outputs = self.encoder(src, src_mask) # print(\"DECODER\") d_output = self.decoder(trg, e_outputs, src_mask, trg_mask)", "= torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src", "= nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]:", "d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output", "config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout", "heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src", "if self.feature_len_map[1] is not None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src", "= nn.Dropout(p = config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None):", "in range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src", "-1) #output = F.softmax(output, dim = -1) pass return output class EmbeddingTransformer(Transformer): def", "// 2)) * config.embedding_vec_dim) d_input = d_input_1 + d_input_2 d_model = 1 self.out", "= None if self.feature_len_map[2] is not None: extra_input_for_FF = input[:, self.feature_len_map[2][0]: self.feature_len_map[2][1]] output", "OutputFeedForward import copy import attention_setting import numpy as np import crispr_attn import math", "N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder", "* (((d_input + 2) // 2 + 2) // 2)) * config.embedding_vec_dim) d_input", "classifier = False): assert attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1", "= OutputFeedForward(d_model, d_input, extra_length, d_layers=attention_setting.output_FF_layers, dropout=dropout) def forward(self, src, trg, extra_input_for_FF=None, src_mask=None, trg_mask=None):", "bs = extra_input_for_FF.size(0) extra_input_for_FF = extra_input_for_FF.view(bs, -1, 4) d_output = cat((d_output, extra_input_for_FF), dim", "if p.dim() > 1: nn.init.xavier_uniform_(p) # if attention_setting.device == 0: # model =", "* x.size(-2)) return x class OTembeddingTransformer(nn.Module): def __init__(self, embedding_vec_dim, d_model, N, heads, dropout,", "= F.softmax(output, dim = -1) pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim", "embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else: embedded_src = self.embedding(src) bs", ">= 2 else \"\" config = importlib.import_module(config_path + \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def", "N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length) self.embedding =", "heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos =", "flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class customized_CNN(nn.Module): def", "self.trg_embedding(trg) embedded_pos_trg = self.trg_embedding_pos(pos) embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg", "output = self.transformer(embedded_src, embedded_trg, extra_input_for_FF=extra_input_for_FF, src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output,", "OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p in model.parameters():", "for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src", "def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0:", "# output = F.log_softmax(output, dim = -1) #output = F.softmax(output, dim = -1)", "!= 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return output class", "assert attention_setting.attention_dropout < 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads,", "torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output = cat((inter_output, flat_d_output),dim=1) if extra_input_for_FF is", "if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 = src[:, config.sep_len:] embedded_src =", "nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input, src_mask=None, trg_mask=None): src = input[:,self.feature_len_map[0][0]: self.feature_len_map[0][1]].long()", "+ \"config\") attention_setting = importlib.import_module(config_path+\"attention_setting\") def get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in", "== 0 assert attention_setting.attention_dropout < 1 if not classifier: model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model,", "* (((d_input + 2) // 2 + 2) // 2) if attention_setting.analysis ==", "e_outputs, src_mask, trg_mask) if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis ==", "heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder =", "self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads,", "22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p", "EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters(): if p.dim()", "self.trg_embedding_pos = nn.Embedding(d_input, embedding_vec_dim) self.dropout = nn.Dropout(p=config.dropout) self.classifier = classifier def forward(self, input,", "for i in range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos =", "for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2) embedded_pos = self.embedding_pos(pos) embedded_src = embedded_pos", "self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len ==", "extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len != 0: src_1 = src[:,:config.sep_len] src_2 = src[:,", "self.out(flat_d_output) return output class customized_CNN(nn.Module): def __init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1),", "src_mask=src_mask, trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim = -1) #output =", "and attention_setting.analysis != 'deepCrispr': flat_d_output = cat((flat_d_output, extra_input_for_FF), dim=1) output = self.out(flat_d_output) return", "classifier = True) for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) return", "F.softmax(output, dim = -1) pass return output class EmbeddingTransformer(Transformer): def __init__(self, embedding_vec_dim ,", "customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input = 64 *", "model = OTembeddingTransformer(attention_setting.n_feature_dim, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p", "__init__(self): super().__init__() self.cnn_1 = nn.Conv2d(1, 32, kernel_size=(3,1), padding=(1,0)) self.maxpool_1 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2", "N, heads, dropout) #self.linear = nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or", "= trg for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask) return", "self.trg_embedding_pos(pos) embedded_trg_1 = embedded_trg + embedded_trg_pos embedded_trg_2 = self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2", "= torch.from_numpy(np.array([[i for i in range(pos_len)] for _ in range(bs)])) pos = pos.to(OT_crispr_attn.device2)", "d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None):", "self.embedding(src) bs = src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len + 1", "= config.dropout) def forward(self, src, trg = None, extra_input_for_FF=None, src_mask=None, trg_mask=None): if config.sep_len", "embedding_vec_dim , d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads,", "embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos = nn.Embedding(config.seq_len -", "if attention_setting.add_seq_cnn: if extra_input_for_FF is not None and attention_setting.analysis == 'deepCrispr': bs =", "64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) else: self.maxpool_2", "self.dropout(embedded_trg_1) else: embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2,", "embedded_trg_2 = embedded_src_2 #embedded_src_2 = transpose(embedded_src_2, 1, 2) output = super().forward(embedded_src_2, embedded_trg_2, extra_input_for_FF)", "= \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2 else \"\" config =", "= get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, src, mask=None):", "= 2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1))", "if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0]", "if self.classifier: # output = F.log_softmax(output, dim = -1) #output = F.softmax(output, dim", "#output = F.softmax(output, dim = -1) pass return output class EmbeddingTransformer(Transformer): def __init__(self,", "= self.embedding(src) bs = src.size(0) pos_length = config.seq_len - config.seq_start - config.word_len +", "_ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src +", "= ((64 * (((d_input + 2) // 2 + 2) // 2)) *", "torch import torch.nn.functional as F from Layers import EncoderLayer, DecoderLayer from Sublayers import", "class Decoder(nn.Module): def __init__(self, d_input, d_model, N, heads, dropout): super().__init__() self.N = N", "nn.Linear() self.cnn = customized_CNN() assert not attention_setting.add_seq_cnn or not attention_setting.add_parallel_cnn if attention_setting.add_seq_cnn: d_input", "= nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0))", "is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] - self.feature_len_map[0][0] self.transformer =", "embedded_trg = embedded_pos_trg + embedded_trg embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF =", "correct config file config_path = \".\".join([\"models\", sys.argv[1]]) + \".\" if len(sys.argv) >= 2", "dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x =", "d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output = self.cnn(src).view(src.size(0), -1) flat_d_output =", "nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22: self.maxpool_2", "= 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input * d_model d_input_2 = ((64 *", "d_model, N, heads, dropout) self.decoder = Decoder(n_feature_dim, d_model, N, heads, dropout) #self.linear =", "+ 2) // 2 + 2) // 2)) * config.embedding_vec_dim) d_input = d_input_1", "F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x) x = x.contiguous().view(x.size(0), -1, x.size(-1) *", "n_feature_dim, N, heads, dropout, extra_length): super().__init__() self.encoder = Encoder(n_feature_dim, d_model, N, heads, dropout)", "padding=(1,0)) else: self.maxpool_2 = nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self,", "padding=(1,0)) self.dropout = nn.Dropout(p = attention_setting.cnn_dropout) def forward(self, input): x = self.maxpool_1(self.cnn_1(input)) x", "self.trg_embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.dropout = nn.Dropout(p = config.dropout)", "forward(self, trg, e_outputs, src_mask=None, trg_mask=None): x = trg for i in range(self.N): x", "config.sep_len:] embedded_src = self.embedding(src_1) embedded_src_2 = self.embedding_2(src_2) embedded_src = cat(tuple([embedded_src, embedded_src_2]), dim=1) else:", "embedded_src_pos = self.embedding_pos(pos) embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg", "0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1] - self.feature_len_map[-1][0] d_input = self.feature_len_map[0][1] -", "= nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos =", "embedded_src_1 = embedded_src + embedded_src_pos embedded_src_2 = self.dropout(embedded_src_1) if trg is not None:", "self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if attention_setting.add_parallel_cnn: src = torch.unsqueeze(src, 1) inter_output =", "self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len + 1, embedding_vec_dim) self.trg_embedding_pos", "trg_mask=trg_mask) if self.classifier: # output = F.log_softmax(output, dim = -1) #output = F.softmax(output,", "== 'deepCrispr': d_model += 4 extra_length = 0 if attention_setting.add_parallel_cnn: d_input_1 = d_input", "model = EmbeddingTransformer(attention_setting.n_feature_dim, d_input, attention_setting.d_model, attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, extra_feature_length) for p in model.parameters():", "2) d_output = torch.unsqueeze(d_output, 1) d_output = self.cnn(d_output) flat_d_output = d_output.view(-1, d_output.size(-2)*d_output.size(-1)) if", "x = trg for i in range(self.N): x = self.layers[i](x, e_outputs, src_mask, trg_mask)", "input): x = self.maxpool_1(self.cnn_1(input)) x = F.relu(x) x = self.maxpool_2(self.cnn_2(x)) x = F.relu(x)", "N, heads, dropout, feature_len_map, classifier=False): super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if", "super().__init__() self.feature_len_map = feature_len_map extra_length = 0 if self.feature_len_map[-1] is None else self.feature_len_map[-1][1]", "None: trg = input[:, self.feature_len_map[1][0]:self.feature_len_map[1][1]].long() else: trg = src embedded_trg = self.trg_embedding(trg) embedded_pos_trg", "heads, dropout, extra_length) self.embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding =", "+ \".\" if len(sys.argv) >= 2 else \"\" config = importlib.import_module(config_path + \"config\")", "= nn.MaxPool2d(kernel_size=(2,1), padding=(1,0)) self.cnn_2 = nn.Conv2d(32, 64, kernel_size=(3,1), padding=(1,0)) if config.seq_len == 22:", "embedded_src = self.dropout(embedded_src) embedded_trg = self.dropout(embedded_trg) extra_input_for_FF = None if self.feature_len_map[2] is not", "self.N = N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model)", "attention_setting.d_model % attention_setting.attention_heads == 0 assert attention_setting.attention_dropout < 1 if not classifier: model", "self.embedding_2 = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.trg_embedding = nn.Embedding(config.embedding_voca_size, embedding_vec_dim) self.embedding_pos = nn.Embedding(config.seq_len - config.word_len", "attention_setting.n_layers, attention_setting.attention_heads, attention_setting.attention_dropout, feature_len_map, classifier = True) for p in model.parameters(): if p.dim()", "torch.nn as nn from torch import cat, transpose import torch import torch.nn.functional as", "d_input_1 = d_input * d_model d_input_2 = ((64 * (((d_input + 2) //", "N self.layers = get_clones(EncoderLayer(d_input, d_model, heads, dropout), N) self.norm = nn.LayerNorm(d_model) def forward(self,", "= nn.LayerNorm(d_model) def forward(self, src, mask=None): x = src for i in range(self.N):", "d_input, d_model, N, heads, dropout, extra_length): super().__init__(d_input, d_model, embedding_vec_dim, N, heads, dropout, extra_length)", "torch.from_numpy(np.array([[i for i in range(pos_length)] for _ in range(bs)])) pos = pos.to(crispr_attn.device2) embedded_src_pos", "embedded_trg_2, extra_input_for_FF) return output def get_OT_model(feature_len_map, classifier = False): assert attention_setting.d_model % attention_setting.attention_heads" ]
[ "size # load data \"\"\" A data set includes three files: [1]. A", "print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print", "columns allowd in the txt file. If an original sample is a matrix", "perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except", "the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001)", "L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00", "#group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state", "in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden,", "#os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import deep_feat_select_mlp_l21norm import classification as cl", "max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print", "DFS for MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact:", "data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell", "cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org,", "valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50,", "as cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some", "causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] # cell", "collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking", "train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0", "cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell +", "batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\":", "#given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng)", "this file is actually a vectorized sample, by concatnating the rows of the", "for MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL>", "OSError: pass # save the weights filename=save_dir + '/' + cell + \"_\"", "+ '/' + cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir +", "import classification as cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano", "features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org,", "+ str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"]", "test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir", "path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for", "filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" # save", "print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir +", "alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001)", "#group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print", "n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected])", "\"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\"", "-e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import", "name of features. Each row is a string (white space not allowed) as", "classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]]", "batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat", "including the name of features. Each row is a string (white space not", "given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance", "+ \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\"", "+ str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid) +", "original sample. [2]. A txt file including the class labels. Each row is", "is a string (white space not allowed) as the feature name of the", "the original sample. [2]. A txt file including the class labels. Each row", "file, each row is a sample, each column is a feature. No row", "str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/'", "partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting", "Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N", "variables:', sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) #", "a string (white space not allowed) as the class label of the corresponding", "alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum)", "\"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid)", "txt file, each row is a sample, each column is a feature. No", "parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size # load data \"\"\"", "+ \"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' +", "#cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" #", "[1]. [3]. A txt file including the name of features. Each row is", "cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\"", "space not allowed) as the class label of the corresponding row in [1].", "print 'Number of select variables:', sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e')", "main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import deep_feat_select_mlp_l21norm import", "param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select", "window size # load data \"\"\" A data set includes three files: [1].", "in the txt file. If an original sample is a matrix (3-way array),", "NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n", "# partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train #", "perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError:", "txt file including the class labels. Each row is a string (white space", "numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train", "Each row is a string (white space not allowed) as the feature name", "the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\" +", "and columns allowd in the txt file. If an original sample is a", "numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window", "features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org,", "gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input", "# save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell +", "as the feature name of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path)", "import numpy import deep_feat_select_mlp_l21norm import classification as cl from gc import collect as", "# taking the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size", "str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"]", "normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01", "+ \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"]", "#cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid in wids: filename=data_dir", "#numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20]", "as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the", "[1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in", "data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10)", "range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs,", "filename=data_dir + cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell +", "# train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200", "the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]]", "alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value()", "param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:',", "cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"]", "test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print", "includes three files: [1]. A TAB seperated txt file, each row is a", "save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save the weights filename=save_dir", "the class label of the corresponding row in [1]. [3]. A txt file", "#numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) #", "+ \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" + str(wid) + \"bp.txt\"", "param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print", "#group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes)", "print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes)", "\"_\" + str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir", "\"\"\" An example of running DFS for MLP based on L2,1-norm. <NAME> NRC,", "#given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print", "[3]. A txt file including the name of features. Each row is a", "# cell type #wid=sys.argv[2] # window size # load data \"\"\" A data", "#given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of", "perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save the weights", "not allowed) as the class label of the corresponding row in [1]. [3].", "the rows of the original sample. [2]. A txt file including the class", "-N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os", "is a feature. No row and columns allowd in the txt file. If", "perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21,", "n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001)", "\"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" + str(wid) + \"bp.txt\" #cl.save_perform(save_dir,filename,perf=perf,std=None,conf_mat=conf_mat,classes_unique=classes_unique,training_time=training_time,test_time=test_time)", "an original sample is a matrix (3-way array), a row of this file", "test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir)", "n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import", "the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size # load", "\"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" +", "test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique)", "in cells: for wid in wids: filename=data_dir + cell + \"_\" + str(wid)", "balance the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes)", "learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8)", "from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan)", "taking the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size #", "pass # save the weights filename=save_dir + '/' + cell + \"_\" +", "str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" + str(wid) +", "sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]]", "each column is a feature. No row and columns allowd in the txt", "lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for", "-l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m", "lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value()", "txt file. If an original sample is a matrix (3-way array), a row", "time import numpy import deep_feat_select_mlp_l21norm import classification as cl from gc import collect", "row and columns allowd in the txt file. If an original sample is", "cells: for wid in wids: filename=data_dir + cell + \"_\" + str(wid) +", "lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size,", "of the corresponding row in [1]. [3]. A txt file including the name", "-r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py", "wid in wids: filename=data_dir + cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32')", "group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]]", "including the class labels. Each row is a string (white space not allowed)", "MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\"", "a sample, each column is a feature. No row and columns allowd in", "row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:', sum(selected)", "'/' + cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" # save the features,", "wids=[200] for cell in cells: for wid in wids: filename=data_dir + cell +", "in wids: filename=data_dir + cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir", "\"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells:", "+ cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\"", "alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if", "'Number of select variables:', sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print", "data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape", "filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir", "2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out", "classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"]", "print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print", "main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys", "\"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save the weights filename=save_dir + '/'", "cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings", "the class labels. Each row is a string (white space not allowed) as", "weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha,", "load data \"\"\" A data set includes three files: [1]. A TAB seperated", "the corresponding row in [1]. [3]. A txt file including the name of", "for cell in cells: for wid in wids: filename=data_dir + cell + \"_\"", "for wid in wids: filename=data_dir + cell + \"_\" + str(wid) + \"bp_Data.txt\";", "#qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL>", "row is a string (white space not allowed) as the class label of", "is a string (white space not allowed) as the class label of the", "data set includes three files: [1]. A TAB seperated txt file, each row", "valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16]", "filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"]", "if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[]", "print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try:", "#group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes)", "set includes three files: [1]. A TAB seperated txt file, each row is", "of running DFS for MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06,", "<gh_stars>10-100 #!/usr/bin/env python \"\"\" An example of running DFS for MLP based on", "#given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes", "+ '/' + cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir +", "row of this file is actually a vectorized sample, by concatnating the rows", "#group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000)", "+ \"_\" + str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename)", "sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test", "the weights filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp.txt\"", "#given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the", "print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]]", "<EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy", "feature. No row and columns allowd in the txt file. If an original", "data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]]", "#given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample", "+ \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell +", "data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max)", "file including the class labels. Each row is a string (white space not", "not allowed) as the feature name of the corresponding column in [1]. \"\"\"", "An example of running DFS for MLP based on L2,1-norm. <NAME> NRC, Ottawa", "test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print", "If an original sample is a matrix (3-way array), a row of this", "cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in", "label of the corresponding row in [1]. [3]. A txt file including the", "+ \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" +", "column is a feature. No row and columns allowd in the txt file.", "# load data \"\"\" A data set includes three files: [1]. A TAB", "import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import deep_feat_select_mlp_l21norm import classification", "classification as cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes", "(3-way array), a row of this file is actually a vectorized sample, by", "sample is a matrix (3-way array), a row of this file is actually", "import deep_feat_select_mlp_l21norm import classification as cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore')", "valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8,", "#group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group)", "#given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the classes rng=numpy.random.RandomState(1000)", "file including the name of features. Each row is a string (white space", "features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) #", "cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid in wids: filename=data_dir + cell", "lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\" + str(wid) +", "+ cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell", "a matrix (3-way array), a row of this file is actually a vectorized", "+ str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\" +", "cell type #wid=sys.argv[2] # window size # load data \"\"\" A data set", "\"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_yes.txt\"", "+ str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" + str(wid)", "save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\"", "print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf print conf_mat perfs=numpy.asarray(perfs)", "-o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import", "main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp'", "if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i", "original sample is a matrix (3-way array), a row of this file is", "numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters", "+ cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas,", "numpy import deep_feat_select_mlp_l21norm import classification as cl from gc import collect as gc_collect", "Each row is a string (white space not allowed) as the class label", "in [1]. [3]. A txt file including the name of features. Each row", "of select variables:', sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1)", "#print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200)", "space not allowed) as the feature name of the corresponding column in [1].", "the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the", "by concatnating the rows of the original sample. [2]. A txt file including", "seperated txt file, each row is a sample, each column is a feature.", "#print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20]", "import sys import time import numpy import deep_feat_select_mlp_l21norm import classification as cl from", "of this file is actually a vectorized sample, by concatnating the rows of", "if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i]", "each row is a sample, each column is a feature. No row and", "sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]]", "+ \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\" + str(wid) +", "allowed) as the class label of the corresponding row in [1]. [3]. A", "No row and columns allowd in the txt file. If an original sample", "max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:', sum(selected) print", "# set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition", "#given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the", "warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2]", "A txt file including the class labels. Each row is a string (white", "wids: filename=data_dir + cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir +", "+ \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save the weights filename=save_dir +", "column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for", "sample, by concatnating the rows of the original sample. [2]. A txt file", "the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]]", "feature name of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\"", "cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell", "\"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"] #given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None)", "print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) #", "+ \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]", "as the class label of the corresponding row in [1]. [3]. A txt", "example of running DFS for MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug.", "\"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M", "data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]]", "cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas, accuracies", "'/' + cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique)", "A data set includes three files: [1]. A TAB seperated txt file, each", "#group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]]", "input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size # load data", "print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]]", "of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]]", "is a matrix (3-way array), a row of this file is actually a", "perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass #", "select variables:', sum(selected) print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum)", "06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o", "sample. [2]. A txt file including the class labels. Each row is a", "alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1))", "# balance the sample sizes of the classes rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print", "# save the weights filename=save_dir + '/' + cell + \"_\" + str(wid)", "+ cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell +", "string (white space not allowed) as the feature name of the corresponding column", "import time import numpy import deep_feat_select_mlp_l21norm import classification as cl from gc import", "cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object)", "# normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1 alpha=0.1", "accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_no.txt\"", "cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[]", "test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf) print perf", "weights filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename)", "os.makedirs(save_dir) except OSError: pass # save the weights filename=save_dir + '/' + cell", "#group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set", "<EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err", "#group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random", "os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid", "txt file including the name of features. Each row is a string (white", "actually a vectorized sample, by concatnating the rows of the original sample. [2].", "weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org))", "#group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]]", "the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"]", "classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print", "#cell=sys.argv[1] # cell type #wid=sys.argv[2] # window size # load data \"\"\" A", "print features[selected] #print param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier,", "a row of this file is actually a vectorized sample, by concatnating the", "str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' + cell + \"_\" + str(wid)", "of the original sample. [2]. A txt file including the class labels. Each", "the txt file. If an original sample is a matrix (3-way array), a", "#cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename)", "#group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) #", "features. Each row is a string (white space not allowed) as the feature", "a string (white space not allowed) as the feature name of the corresponding", "file is actually a vectorized sample, by concatnating the rows of the original", "batch_size=batch_size, activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected])", "Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm", "activation_func=activation_func, rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print", "setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\":", "# Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1]", "based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub", "procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea", "result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid in wids:", "row in [1]. [3]. A txt file including the name of features. Each", "[2]. A txt file including the class labels. Each row is a string", "random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the data", "parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if", "vectorized sample, by concatnating the rows of the original sample. [2]. A txt", "main_DFS_feature_selection_l21norm.err -M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time", "features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:', sum(selected) print features[selected] #print", "lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org,", "lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org,", "try: os.makedirs(save_dir) except OSError: pass # save the weights filename=save_dir + '/' +", "#wid=sys.argv[2] # window size # load data \"\"\" A data set includes three", "rng=numpy.random.RandomState(1000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]]", "is actually a vectorized sample, by concatnating the rows of the original sample.", "class label of the corresponding row in [1]. [3]. A txt file including", "train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1", "numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) #", "'/' + cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/'", "three files: [1]. A TAB seperated txt file, each row is a sample,", "\"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"] #given=[\"A-P\",\"A-X\"]", "#group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]]", "#cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir + \"_\".join(classes_unique) #filename=cell + \"_\" + str(wid) + \"bp.txt\" #cl.save_perform(save_dir,filename,perf=perf,std=None,conf_mat=conf_mat,classes_unique=classes_unique,training_time=training_time,test_time=test_time) gc_collect()", "of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000]", "max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number", "-M <EMAIL> -m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import", "-m bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import", "train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func, rng=rng,", "conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save the", "corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200]", "rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization", "string (white space not allowed) as the class label of the corresponding row", "classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2, n_hidden=n_hidden, n_epochs=n_epochs, batch_size=batch_size, activation_func=activation_func,", "data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid in", "class labels. Each row is a string (white space not allowed) as the", "lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)):", "for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1,", "weights_selected=[] weights=[] perfs=[] for i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate,", "filename=save_dir + '/' + cell + \"_\" + str(wid) + \"bp_unique_no.txt\" #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=False,tol=1e-3,filename=filename) #save_dir=result_dir", "row is a sample, each column is a feature. No row and columns", "#!/usr/bin/env python \"\"\" An example of running DFS for MLP based on L2,1-norm.", "#group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print", "os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import deep_feat_select_mlp_l21norm import classification as", "except OSError: pass # save the weights filename=save_dir + '/' + cell +", "corresponding row in [1]. [3]. A txt file including the name of features.", "A txt file including the name of features. Each row is a string", "a vectorized sample, by concatnating the rows of the original sample. [2]. A", "learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of", "the name of features. Each row is a string (white space not allowed)", "row_sum print 'Number of select variables:', sum(selected) print features[selected] #print param0[selected] #print param0", "sys import time import numpy import deep_feat_select_mlp_l21norm import classification as cl from gc", "abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time", "print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass # save", "is a sample, each column is a feature. No row and columns allowd", "on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l", "n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if", "Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] #", "some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] # cell type", "+ str(wid) + \"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir +", "name of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"]", "\"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+", "activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001)", "cell in cells: for wid in wids: filename=data_dir + cell + \"_\" +", "cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[]", "Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r n -N main_DFS_feature_selection_l21norm -o main_DFS_feature_selection_l21norm.out -e", "rows of the original sample. [2]. A txt file including the class labels.", "of features. Each row is a string (white space not allowed) as the", "features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell + \"_\" + str(wid)", "+ \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object)", "train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu'", "print perf print conf_mat perfs=numpy.asarray(perfs) save_dir=result_dir + \"_\".join(classes_unique) try: os.makedirs(save_dir) except OSError: pass", "param0[selected] #print param0 #numpy.savetxt('param0.txt',param0,delimiter='\\t',fmt='%.4e') print abs(param0).sum(axis=1) weights.append(row_sum) # test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print", "type #wid=sys.argv[2] # window size # load data \"\"\" A data set includes", "labels. Each row is a string (white space not allowed) as the class", "+ '/' + cell + \"_\" + str(wid) + \"bp_unique_yes.txt\" # save the", "a feature. No row and columns allowd in the txt file. If an", "numpy.unique(classes) #group=[[\"A-E\"],[\"I-E\"],[\"A-P\"],[\"I-P\"],[\"A-X\"],[\"I-X\"],[\"UK\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] group=[[\"A-E\"],[\"A-P\"],[\"I-E\",\"I-P\",\"A-X\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"],[\"I-E\",\"I-P\",\"I-X\",\"UK\"]] #group=[[\"A-E\"],[\"I-E\"]] #group=[[\"A-P\"],[\"I-P\"]] #group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]]", "array), a row of this file is actually a vectorized sample, by concatnating", "data \"\"\" A data set includes three files: [1]. A TAB seperated txt", "alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\":", "rng=rng, max_num_epoch_change_learning_rate=50, max_num_epoch_change_rate=0.8, learning_rate_decay_rate=0.8) param0=classifier.params[0].get_value() param1=classifier.params[1].get_value() row_sum=numpy.sqrt((param0**2).sum(axis=1)) max_param=numpy.max(row_sum) selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum", "classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng)", "allowed) as the feature name of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\"", "learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\":", "if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001)", "import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) #", "file. If an original sample is a matrix (3-way array), a row of", "print data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max)", "save the weights filename=save_dir + '/' + cell + \"_\" + str(wid) +", "[1]. A TAB seperated txt file, each row is a sample, each column", "weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:', sum(selected) print features[selected] #print param0[selected]", "matrix (3-way array), a row of this file is actually a vectorized sample,", "str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell + \"_\" + str(wid) + \"bp_Classes.txt\";", "filename=data_dir + cell + \"_\" + str(wid) + \"bp_Data.txt\"; data=numpy.loadtxt(filename,delimiter='\\t',dtype='float32') filename=data_dir + cell", "#group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000)", "n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if cell==\"GM12878\": lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001)", "state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng)", "<NAME> NRC, Ottawa Aug. 06, 2015 Contact: <EMAIL> \"\"\" #qsub -l procs=1,pmem=2000mb,walltime=36:00:00 -r", "selected=row_sum>(max_param*0.001) features_selected.append(features[selected]) weights_selected.append(row_sum[selected]) print row_sum print 'Number of select variables:', sum(selected) print features[selected]", "(white space not allowed) as the class label of the corresponding row in", "bea main_deep_feat_select_mlp_l21norm.py import os #os.environ['THEANO_FLAGS']='device=cpu,base_compile=/var/tmp' import sys import time import numpy import deep_feat_select_mlp_l21norm", "\"_\" + str(wid) + \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"]", "A TAB seperated txt file, each row is a sample, each column is", "the feature name of the corresponding column in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\"", "#wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell in cells: for wid in wids: filename=data_dir +", "data train_set_x_org,train_set_y_org,valid_set_x_org,valid_set_y_org,test_set_x_org,test_set_y_org=cl.partition_train_valid_test(data,classes,ratio=(2,1,1),rng=rng) # normalization train_set_x_org,data_min,data_max=cl.normalize_col_scale01(train_set_x_org,tol=1e-10) valid_set_x_org,_,_=cl.normalize_col_scale01(valid_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter", "concatnating the rows of the original sample. [2]. A txt file including the", "python \"\"\" An example of running DFS for MLP based on L2,1-norm. <NAME>", "#given=[\"A-E\",\"A-P\",\"A-X\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\"] #given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\"] #given=[\"I-E\",\"I-P\"] data,classes,_=cl.take_some_classes(data,classes,given=given,others=None) # balance the sample sizes of the classes", "(white space not allowed) as the feature name of the corresponding column in", "#group=[[\"A-E\"],[\"A-P\"]] #group=[[\"A-E\"],[\"A-X\"]] #group=[[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\"],[\"A-P\"],[\"A-X\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"]] #group=[[\"A-E\",\"A-P\"],[\"I-E\",\"I-P\"]] #group=[[\"A-E\",\"I-E\"],[\"A-P\",\"I-P\"],[\"A-X\",\"I-X\"]] #group=[[\"A-E\",\"A-P\",\"A-X\"],[\"I-E\",\"I-P\",\"I-X\"]] #group=[[\"I-E\"],[\"I-P\"]] classes=cl.merge_class_labels(classes,group) print numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes)", "row is a string (white space not allowed) as the feature name of", "deep_feat_select_mlp_l21norm import classification as cl from gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') #", "print row_sum print 'Number of select variables:', sum(selected) print features[selected] #print param0[selected] #print", "# window size # load data \"\"\" A data set includes three files:", "in [1]. \"\"\" path=\"/home/yifengli/prog/my/DECRES/\" os.chdir(path) data_dir=\"/home/yifengli/prog/my/DECRES/data/\" result_dir=\"/home/yifengli/prog/my/DECRES/result/\" #cells=[\"GM12878\",\"HepG2\",\"K562\",\"HelaS3\",\"HUVEC\",\"A549\",\"MCF7\",\"HMEC\"] #wids=[200,500,1000,2000,4000] cells=[\"HelaS3\"] wids=[200] for cell", "\"bp_unique_yes.txt\" # save the features, lambdas, accuracies #cl.write_feature_weight2(weights,features,lambda21s,perfs[:,-3],uniqueness=True,tol=1e-3,filename=filename) filename=save_dir + '/' + cell", "+ \"bp_Classes.txt\"; classes=numpy.loadtxt(filename,delimiter='\\t',dtype=object) filename=data_dir+ cell + \"_Features.txt\" features=numpy.loadtxt(filename,delimiter='\\t',dtype=object) given=[\"A-E\",\"I-E\",\"A-P\",\"I-P\",\"A-X\",\"I-X\",\"UK\"] #given=[\"A-E\",\"I-E\"] #given=[\"A-P\",\"I-P\"] #given=[\"A-E\",\"A-P\"] #given=[\"A-E\",\"A-X\"]", "i in range(len(lambda21s)): lambda21=lambda21s[i] classifier,training_time=deep_feat_select_mlp_l21norm.train_model(train_set_x_org=train_set_x_org, train_set_y_org=train_set_y_org, valid_set_x_org=valid_set_x_org, valid_set_y_org=valid_set_y_org, learning_rate=learning_rate, alpha=alpha, lambda21=lambda21, alpha1=alpha1, alpha2=alpha2,", "TAB seperated txt file, each row is a sample, each column is a", "allowd in the txt file. If an original sample is a matrix (3-way", "+ cell + \"_\" + str(wid) + \"bp.txt\" #cl.write_feature_weight(weights,features,lambda21s,filename) filename=save_dir + '/' +", "sample, each column is a feature. No row and columns allowd in the", "# setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000 batch_size=200 activation_func='relu' if", "files: [1]. A TAB seperated txt file, each row is a sample, each", "gc import collect as gc_collect numpy.warnings.filterwarnings('ignore') # Theano causes some warnings numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4)", "# test test_set_y_pred,test_set_y_pred_prob,test_time=deep_feat_select_mlp_l21norm.test_model(classifier, test_set_x_org, batch_size=200) print test_set_y_pred[0:20] print test_set_y_pred_prob[0:20] print test_time perf,conf_mat=cl.perform(test_set_y_org,test_set_y_pred,numpy.unique(train_set_y_org)) perfs.append(perf)", "set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape print numpy.unique(classes) # partition the", "numpy.set_printoptions(threshold=numpy.nan) numpy.set_printoptions(precision=4) # taking the input parameters #cell=sys.argv[1] # cell type #wid=sys.argv[2] #", "numpy.unique(classes) classes_unique,classes=cl.change_class_labels(classes) print numpy.unique(classes) # set random state #numpy.random.seed(1000) rng=numpy.random.RandomState(2000) data,classes,others=cl.balance_sample_size(data,classes,others=None,min_size_given=None,rng=rng) print data.shape", "running DFS for MLP based on L2,1-norm. <NAME> NRC, Ottawa Aug. 06, 2015", "lambda21s=numpy.arange(0.020,-0.0001,-0.0001) if cell==\"HepG2\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"HelaS3\": lambda21s=numpy.arange(0.028,-0.0001,-0.0001) if cell==\"K562\": lambda1s=numpy.arange(0.025,-0.0001,-0.0001) lambda21s=[0.02]#numpy.arange(0.04,-0.001,-0.001) features_selected=[] weights_selected=[]", "\"\"\" A data set includes three files: [1]. A TAB seperated txt file,", "test_set_x_org,_,_=cl.normalize_col_scale01(test_set_x_org,tol=1e-10,data_min=data_min,data_max=data_max) # train # setting the parameter learning_rate=0.1 alpha=0.1 alpha1=0.01 alpha2=0.0 n_hidden=[128,16]#[256,64,16] n_epochs=1000" ]
[ "<filename>client/hotbox.py if __name__ == \"__main__\": from gui import App app = App() app.mainloop()" ]
[ "Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async def second(self, tools: objects.tools, package:", "async def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id", "= package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # -> framework will reload", "async def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id", "raise exceptions.Quit(\"test\") # -> to finish your framework (closing all projects that was", "from testcanarybot import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit'])", "import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot", "second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id,", "['quit']) # @testcanarybot quit async def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send(", "launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools:", "message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # -> framework will reload your library", "import random from testcanarybot import objects from testcanarybot import exceptions # Copyright 2021", "package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'выхожу", "# -> to finish your framework (closing all projects that was launched by", "package.peer_id, message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish", "tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools: objects.tools, package:", "package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # -> framework will reload your", "framework (closing all projects that was launched by tppm) @objects.priority(commands = ['lib_reload']) #", "= ['quit']) # @testcanarybot quit async def second(self, tools: objects.tools, package: objects.package): await", "objects from testcanarybot import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands =", "your framework (closing all projects that was launched by tppm) @objects.priority(commands = ['lib_reload'])", "that was launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def", "await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' ) raise", "objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message =", "to finish your framework (closing all projects that was launched by tppm) @objects.priority(commands", "tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # -> framework", "из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish your framework (closing all", "kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async def second(self, tools:", "class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async def second(self, tools: objects.tools,", "objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из", "import objects from testcanarybot import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands", "from testcanarybot import objects from testcanarybot import exceptions # Copyright 2021 kensoi class", "was launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def second2(self,", "lib_reload async def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(),", "(closing all projects that was launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot", "2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async def second(self,", "@objects.priority(commands = ['quit']) # @testcanarybot quit async def second(self, tools: objects.tools, package: objects.package):", "random_id = tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из фреймворка...' ) raise", "random_id = tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") #", "# @testcanarybot lib_reload async def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id", "= tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # ->", "testcanarybot import objects from testcanarybot import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule):", "def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id =", "await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из фреймворка...'", ") raise exceptions.Quit(\"test\") # -> to finish your framework (closing all projects that", "= ['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools: objects.tools, package: objects.package): await", "peer_id = package.peer_id, message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # ->", "random from testcanarybot import objects from testcanarybot import exceptions # Copyright 2021 kensoi", "exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit", "exceptions.Quit(\"test\") # -> to finish your framework (closing all projects that was launched", "= package.peer_id, message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to", "all projects that was launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload", "Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async def", "@objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools: objects.tools, package: objects.package):", "['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send(", "tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\")", "tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message", "def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id =", "second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id,", "'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish your framework (closing", "package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...'", "= 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish your framework", "= tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\")", "message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish your", "-> to finish your framework (closing all projects that was launched by tppm)", "# Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) # @testcanarybot quit async", "quit async def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id = tools.gen_random(),", "peer_id = package.peer_id, message = 'перезагружаю...' ) raise exceptions.LibraryReload(\"Reload\") # -> framework will", "tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из фреймворка...' ) raise exceptions.Quit(\"test\") #", "objects.package): await tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'перезагружаю...' )", "# @testcanarybot quit async def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id", "@testcanarybot lib_reload async def second2(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id =", "tools.api.messages.send( random_id = tools.gen_random(), peer_id = package.peer_id, message = 'выхожу из фреймворка...' )", "finish your framework (closing all projects that was launched by tppm) @objects.priority(commands =", "projects that was launched by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async", "testcanarybot import exceptions # Copyright 2021 kensoi class Main(objects.libraryModule): @objects.priority(commands = ['quit']) #", "@testcanarybot quit async def second(self, tools: objects.tools, package: objects.package): await tools.api.messages.send( random_id =", "by tppm) @objects.priority(commands = ['lib_reload']) # @testcanarybot lib_reload async def second2(self, tools: objects.tools,", "фреймворка...' ) raise exceptions.Quit(\"test\") # -> to finish your framework (closing all projects" ]
[]
[ "# Function reads in system state and returns its derivative def dXdt(X,t): VV,", "(uA/cm^2) # Define amplitude, init time and end time amp = 0 #0.003", "= (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt", "f.close() # Save current stimulation data f = open('output/stimulation.csv', 'w') for i in", "non-inactivating and is controlld by a single activation gating variable (n). The full", "return gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT *", "X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) +", "scale (default = 0.02), sample every 'n'th point def load_stim(name, scale, n): stim", "= soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) +", "activation gating variable (m) and an inactivation gating variable (h). The potassium channel", "(ms) T = 7400 dt = 0.025 # Generate array of time points,", "activation gating variable (n). The full model state x comprises four state variables", "'n'th point def load_stim(name, scale, n): stim = [] with open(name, \"r\") as", "by the python script. ''' import scipy as sp import numpy as np", "(VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV", "= odeint(dXdt, init, t) # Define variables to simplify analysis VV = X[:,0]", "in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The", "duration (ms) T = 7400 dt = 0.025 # Generate array of time", "of the Neuron'. The neuron model consists of three ionic currents: a passive", "state variable that it is possible to measure experimentally is the membrane voltage.", "and returns its derivative def dXdt(X,t): VV, mm, hh, nn, idx = X", "Args: file path, amplitude scale (default = 0.02), sample every 'n'th point def", "np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim +=", "in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save current stimulation data f", "reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0,", "return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) /", "count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise", "Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron", "-17.65 ahV3 = 27.22 th0 = 0.701 epsh = 12.90 anV1 = -34.58", "data point # Function reads in system state and returns its derivative def", "amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def", "current injection protocol (uA/cm^2) # Args: file path, amplitude scale (default = 0.02),", "sample every 'n'th point def load_stim(name, scale, n): stim = [] with open(name,", "ahV3 = 27.22 th0 = 0.701 epsh = 12.90 anV1 = -34.58 anV2", "as to replicate the behaviour of the thalamocortical relay neuron presented in Huguenard", "by an activation gating variable (m) and an inactivation gating variable (h). The", "Define equations of motion for full neuron state x = [V,m,h,n] # Use", "into the neuron ############################################################################## # Function for injected a current step (uA/cm^2) #", "model: forward-integrating the equations of motion ############################################################################## # Integrate model equations # Arguments:", "stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show()", "a current step (uA/cm^2) # Args: amplitude, init time, final time def i_inj(t):", "Function for loading current injection protocol (uA/cm^2) # Args: file path, amplitude scale", "experimentally is the membrane voltage. This is the state variable output by the", "amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2) # Args:", "2020 Conductance model of an RVLM neuron for use with reservoir computing using", "comprises four state variables - the membrane voltage and the three gating varibales", "capacitance: Cm; # time constants: tx0, epsx Cm = 1 gNaT = 69", "# thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0, epsx Cm =", "= 69 ENa = 41 gK = 6.9 EK = -100 EL =", "for loading current injection protocol (uA/cm^2) # Args: file path, amplitude scale (default", "VV_obs[i]) f.close() # Save current stimulation data f = open('output/stimulation.csv', 'w') for i", "as: x = [V,m,h,n] The only state variable that it is possible to", "- anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/", "ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV", "anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2) #", "the equations of motion ############################################################################## # Integrate model equations # Arguments: state derivative,", "for i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save voltage data", "= (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) -", "sodium current (NaT), and a potassium current (K). The sodium current is controlled", "x = [V,m,h,n] The only state variable that it is possible to measure", "state variable output by the python script. ''' import scipy as sp import", "############################################################################## # Define total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane", "neuron for use with reservoir computing using a modified Hodgkin-Huxley framework of ion", "open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save", "a single activation gating variable (n). The full model state x comprises four", "- amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 +", "as np import matplotlib.pyplot as plt from scipy.integrate import odeint # Define constants", "The only state variable that it is possible to measure experimentally is the", "currents: a passive leak current, a transient sodium current (NaT), and a potassium", "tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation to be", "leak current, a transient sodium current (NaT), and a potassium current (K). The", "epsh = 12.90 anV1 = -34.58 anV2 = 22.17 anV3 = 23.58 tn0", "thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0, epsx Cm = 1", "300 ############################################################################## # Initializing the neuron model ############################################################################## # Initialize state variable values", "EL) def I_K(VV,nn): return gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh):", "Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond", "sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error", "Preparing current stimulation to be injected into the neuron ############################################################################## # Function for", "(mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV)", "Initialise stim or load external stimulation files # If not loading in external", "potential # Final value in the init array is idx (starts at 0)", "mm = X[:,1] hh = X[:,2] nn = X[:,3] # Adding Gaussian error", "amV2 = 10 amV3 = 23.39 tm0 = 0.143 epsm = 1.099 ahV1", "dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions", "protocol (uA/cm^2) # Args: file path, amplitude scale (default = 0.02), sample every", "modified Hodgkin-Huxley framework of ion channel gating. Model parameters are chosen so as", "for full neuron state x = [V,m,h,n] # Use idx to read in", "= -17.65 ahV3 = 27.22 th0 = 0.701 epsh = 12.90 anV1 =", "gating variable (h). The potassium channel is non-inactivating and is controlld by a", "plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage", "current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation time", "voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs =", "(n). The full model state x comprises four state variables - the membrane", "plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise) f =", "0.143 epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22", "of ion channel gating. Model parameters are chosen so as to replicate the", "J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron model", "Define functions for gating kinetics of ion channels # Effect of temperature is", "current step (uA/cm^2) # Args: amplitude, init time, final time def i_inj(t): return", "amplitude scale (default = 0.02), sample every 'n'th point def load_stim(name, scale, n):", "neuron model consists of three ionic currents: a passive leak current, a transient", "-34.58 anV2 = 22.17 anV3 = 23.58 tn0 = 1.291 epsn = 4.314", "amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2))", "(cm) soma_len = 0.01 soma_diam = 0.029/PI # Define model parameters # conductances:", "stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step", "[V,m,h,n] The only state variable that it is possible to measure experimentally is", "# Use idx to read in correct current stimulation data point # Function", "= X[:,2] nn = X[:,3] # Adding Gaussian error to voltage trace (mV)", "data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f", "nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3)))", "Department of Physics University of Bath, UK May 1st, 2020 Conductance model of", "(tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10)", "current is controlled by an activation gating variable (m) and an inactivation gating", "framework of ion channel gating. Model parameters are chosen so as to replicate", "init array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running", "currents (in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium, # and", "sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1", "= -65 gLeak = 0.465 amV1 = -39.92 amV2 = 10 amV3 =", "relay neuron presented in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of", "stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV)", "for ionic currents (in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium,", "dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV)", "from zero to T t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion", "mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 +", "27.22 th0 = 0.701 epsh = 12.90 anV1 = -34.58 anV2 = 22.17", "I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa) # Define", "0.02), sample every 'n'th point def load_stim(name, scale, n): stim = [] with", "/ Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt", "3.14159265359 # Model duration (ms) T = 7400 dt = 0.025 # Generate", "return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1", "presented in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'.", "correspond to passive leak, delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV):", "gating kinetics of ion channels # Effect of temperature is accounted for by", "I_K(VV,nn): return gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT", "= X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn)", "7400 dt = 0.025 # Generate array of time points, from zero to", "############################################################################## # Model Equations of Motion ############################################################################## # Define functions for gating kinetics", "############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI # Define", "leak, delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV): return gLeak *", "+ stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) -", "Define variables to simplify analysis VV = X[:,0] mm = X[:,1] hh =", "def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0", "Integrate model equations # Arguments: state derivative, initial neuron state x(0), time point", "* mm**3 * hh * (VV - ENa) # Define equations of motion", "current stimulation data point # Function reads in system state and returns its", "voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\")", "plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data", "delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV): return gLeak * (VV", "- the membrane voltage and the three gating varibales m, h, and n,", "of Physics University of Bath, UK May 1st, 2020 Conductance model of an", "count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or", "(mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV +", "mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3)))", "def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0", "stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8)", "0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define", "model consists of three ionic currents: a passive leak current, a transient sodium", "- sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return", "<NAME> (<EMAIL>) Department of Physics University of Bath, UK May 1st, 2020 Conductance", "nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh", "to voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs", "gating. Model parameters are chosen so as to replicate the behaviour of the", "return gNaT * mm**3 * hh * (VV - ENa) # Define equations", "= 1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation to be injected", "of motion for full neuron state x = [V,m,h,n] # Use idx to", "X[:,3] # Adding Gaussian error to voltage trace (mV) sigma_obs = 0.1 obs_error", "i_inj(t) # Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential", "membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current", "channel gating. Model parameters are chosen so as to replicate the behaviour of", "voltage data (with gaussian noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))):", "VV + obs_error ############################################################################## # Plotting and saving model output ############################################################################## # Define", "# and transient sodium currents def I_Leak(VV): return gLeak * (VV - EL)", "noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i])", "PI = 3.14159265359 # Model duration (ms) T = 7400 dt = 0.025", "'Electrophysiology of the Neuron'. The neuron model consists of three ionic currents: a", "potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0, epsx", "= -100 EL = -65 gLeak = 0.465 amV1 = -39.92 amV2 =", "+= load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2)", "(-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt =", "0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 -", "stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat',", "(uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv',", "functions for ionic currents (in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier", "1st, 2020 Conductance model of an RVLM neuron for use with reservoir computing", "41 gK = 6.9 EK = -100 EL = -65 gLeak = 0.465", "############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam", "- amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/", "potassium current (K). The sodium current is controlled by an activation gating variable", "amV1 = -39.92 amV2 = 10 amV3 = 23.39 tm0 = 0.143 epsm", "simplify analysis VV = X[:,0] mm = X[:,1] hh = X[:,2] nn =", "variable that it is possible to measure experimentally is the membrane voltage. This", "the membrane voltage. This is the state variable output by the python script.", "= 96480 PI = 3.14159265359 # Model duration (ms) T = 7400 dt", "# Save voltage data (with gaussian noise) f = open('output/voltage.csv', 'w') for i", "neuron state x = [V,m,h,n] # Use idx to read in correct current", "= 23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation", "(uA/cm^2) # Args: amplitude, init time, final time def i_inj(t): return amp*(t>t_i) -", "############################################################################## # Integrate model equations # Arguments: state derivative, initial neuron state x(0),", "- sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return", "I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn): return gK * nn**4", "resting potential # Final value in the init array is idx (starts at", "that it is possible to measure experimentally is the membrane voltage. This is", "+ obs_error ############################################################################## # Plotting and saving model output ############################################################################## # Define total", "channel is non-inactivating and is controlld by a single activation gating variable (n).", "Define functions for ionic currents (in uA/cm^2) # Currents correspond to passive leak,", "The neuron model consists of three ionic currents: a passive leak current, a", "1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation to be injected into", "############################################################################## # Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default", "loading current injection protocol (uA/cm^2) # Args: file path, amplitude scale (default =", "0 #0.003 t_i = 100 t_f = 300 ############################################################################## # Initializing the neuron", "Save voltage data (with gaussian noise) f = open('output/voltage.csv', 'w') for i in", "(without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n'", "Cm; # time constants: tx0, epsx Cm = 1 gNaT = 69 ENa", "for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save current stimulation", "def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa) #", "end time amp = 0 #0.003 t_i = 100 t_f = 300 ##############################################################################", "is the membrane voltage. This is the state variable output by the python", "amV3 = 23.39 tm0 = 0.143 epsm = 1.099 ahV1 = -65.37 ahV2", "EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV - ENa)", "array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model:", "# Define variables to simplify analysis VV = X[:,0] mm = X[:,1] hh", "def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/", "def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0", "it is possible to measure experimentally is the membrane voltage. This is the", "returns its derivative def dXdt(X,t): VV, mm, hh, nn, idx = X soma_area", "= load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02,", "script. ''' import scipy as sp import numpy as np import matplotlib.pyplot as", "at steady-state resting potential # Final value in the init array is idx", "steady-state resting potential # Final value in the init array is idx (starts", "controlld by a single activation gating variable (n). The full model state x", "= 300 ############################################################################## # Initializing the neuron model ############################################################################## # Initialize state variable", "- ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/", "+ epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def", "time and end time amp = 0 #0.003 t_i = 100 t_f =", "X[:,2] nn = X[:,3] # Adding Gaussian error to voltage trace (mV) sigma_obs", "state x = [V,m,h,n] # Use idx to read in correct current stimulation", "= 6.9 EK = -100 EL = -65 gLeak = 0.465 amV1 =", "Plotting and saving model output ############################################################################## # Define total current stimulation = stim[0:len(VV)]", "gating varibales m, h, and n, and is thus described as: x =", "by the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2))", "sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV -", "stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat',", "load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) #", "+ i_inj(t) # Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane", "x(0), time point array X = odeint(dXdt, init, t) # Define variables to", "as sp import numpy as np import matplotlib.pyplot as plt from scipy.integrate import", "(1997) 'Electrophysiology of the Neuron'. The neuron model consists of three ionic currents:", "for injected a current step (uA/cm^2) # Args: amplitude, init time, final time", "def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection protocol", "load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20)", "ionic currents: a passive leak current, a transient sodium current (NaT), and a", "= [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of motion ############################################################################## #", "variables to simplify analysis VV = X[:,0] mm = X[:,1] hh = X[:,2]", "voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))):", "Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2)", "numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint # Define", "scale, n): stim = [] with open(name, \"r\") as ins: count = 0", "= 23.39 tm0 = 0.143 epsm = 1.099 ahV1 = -65.37 ahV2 =", "trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV", "f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close()", "is thus described as: x = [V,m,h,n] The only state variable that it", "motion ############################################################################## # Integrate model equations # Arguments: state derivative, initial neuron state", "ins: count = 0 for line in ins: count+=1 if count % n", "TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359 # Model duration (ms)", "neuron state x(0), time point array X = odeint(dXdt, init, t) # Define", "Motion ############################################################################## # Define functions for gating kinetics of ion channels # Effect", "tx0, epsx Cm = 1 gNaT = 69 ENa = 41 gK =", "return gLeak * (VV - EL) def I_K(VV,nn): return gK * nn**4 *", "= np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim", "(default = 0.02), sample every 'n'th point def load_stim(name, scale, n): stim =", "= np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## # Define functions for", "every 'n'th point def load_stim(name, scale, n): stim = [] with open(name, \"r\")", "96480 PI = 3.14159265359 # Model duration (ms) T = 7400 dt =", "ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV -", "# Running model: forward-integrating the equations of motion ############################################################################## # Integrate model equations", "0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or load external stimulation files", "use with reservoir computing using a modified Hodgkin-Huxley framework of ion channel gating.", "/ 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV):", "100 t_f = 300 ############################################################################## # Initializing the neuron model ############################################################################## # Initialize", "% VV[i]) f.close() # Save voltage data (with gaussian noise) f = open('output/voltage.csv',", "# Arguments: state derivative, initial neuron state x(0), time point array X =", "== 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or load external stimulation", "for by the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/", "return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1", "\\n' % VV_obs[i]) f.close() # Save current stimulation data f = open('output/stimulation.csv', 'w')", "T = 7400 dt = 0.025 # Generate array of time points, from", "using a modified Hodgkin-Huxley framework of ion channel gating. Model parameters are chosen", "and is controlld by a single activation gating variable (n). The full model", "[V,m,h,n] # Use idx to read in correct current stimulation data point #", "three ionic currents: a passive leak current, a transient sodium current (NaT), and", "init, t) # Define variables to simplify analysis VV = X[:,0] mm =", "the membrane voltage and the three gating varibales m, h, and n, and", "- nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ##############################################################################", "= X[:,3] # Adding Gaussian error to voltage trace (mV) sigma_obs = 0.1", "Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron model consists of three", "sp import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint", "# Model duration (ms) T = 7400 dt = 0.025 # Generate array", "t) # Define variables to simplify analysis VV = X[:,0] mm = X[:,1]", "state variables - the membrane voltage and the three gating varibales m, h,", "Function reads in system state and returns its derivative def dXdt(X,t): VV, mm,", "constants: tx0, epsx Cm = 1 gNaT = 69 ENa = 41 gK", "anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2) # Currents", "current stimulation to be injected into the neuron ############################################################################## # Function for injected", "python script. ''' import scipy as sp import numpy as np import matplotlib.pyplot", "############################################################################## # Initializing the neuron model ############################################################################## # Initialize state variable values for", "derivative, initial neuron state x(0), time point array X = odeint(dXdt, init, t)", "hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ##############################################################################", "ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701 epsh", "12.90 anV1 = -34.58 anV2 = 22.17 anV3 = 23.58 tn0 = 1.291", "f.close() # Save voltage data (with gaussian noise) f = open('output/voltage.csv', 'w') for", "scipy.integrate import odeint # Define constants TEMP_C = 35 FARADAY = 96480 PI", "- mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return", "amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV", "'w') for i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save voltage", "############################################################################## # Preparing current stimulation to be injected into the neuron ############################################################################## #", "array of time points, from zero to T t = np.arange(0,T,dt) ############################################################################## #", "ionic currents (in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium, #", "in the init array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ##############################################################################", "# time constants: tx0, epsx Cm = 1 gNaT = 69 ENa =", "(K). The sodium current is controlled by an activation gating variable (m) and", "-65.37 ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701 epsh = 12.90", "ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim #", "# Define model parameters # conductances: gX; reversal potentials: EX; # thresholds: aXV1;", "and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8)", "Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in", "to be injected into the neuron ############################################################################## # Function for injected a current", "= -65.37 ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701 epsh =", "gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; # time constants:", "and an inactivation gating variable (h). The potassium channel is non-inactivating and is", "0.701 epsh = 12.90 anV1 = -34.58 anV2 = 22.17 anV3 = 23.58", "+= load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define amplitude, init time", "derivative def dXdt(X,t): VV, mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI", "time amp = 0 #0.003 t_i = 100 t_f = 300 ############################################################################## #", "full model state x comprises four state variables - the membrane voltage and", "anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in", "of motion ############################################################################## # Integrate model equations # Arguments: state derivative, initial neuron", "0.465 amV1 = -39.92 amV2 = 10 amV3 = 23.39 tm0 = 0.143", "return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma", "(th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10)", "replicate the behaviour of the thalamocortical relay neuron presented in Huguenard J, McCormick", "an activation gating variable (m) and an inactivation gating variable (h). The potassium", "open(name, \"r\") as ins: count = 0 for line in ins: count+=1 if", "files # If not loading in external stim, uncomment line below #stim =", "m, h, and n, and is thus described as: x = [V,m,h,n] The", "mm**3 * hh * (VV - ENa) # Define equations of motion for", "6.9 EK = -100 EL = -65 gLeak = 0.465 amV1 = -39.92", "time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection", "dXdt(X,t): VV, mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx =", "transient sodium current (NaT), and a potassium current (K). The sodium current is", "plt.show() # Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for", "hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt =", "0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) #", "(VV - EL) def I_K(VV,nn): return gK * nn**4 * (VV - EK)", "Running model: forward-integrating the equations of motion ############################################################################## # Integrate model equations #", "EK = -100 EL = -65 gLeak = 0.465 amV1 = -39.92 amV2", "the thalamocortical relay neuron presented in Huguenard J, McCormick DA, Shepherd GM (1997)", "is controlld by a single activation gating variable (n). The full model state", "possible to measure experimentally is the membrane voltage. This is the state variable", "ion channels # Effect of temperature is accounted for by the Q10 coeff", "potassium, # and transient sodium currents def I_Leak(VV): return gLeak * (VV -", "load external stimulation files # If not loading in external stim, uncomment line", "stim # Initialise stim or load external stimulation files # If not loading", "mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt,", "state and returns its derivative def dXdt(X,t): VV, mm, hh, nn, idx =", "(VV - ENa) # Define equations of motion for full neuron state x", "and transient sodium currents def I_Leak(VV): return gLeak * (VV - EL) def", "1 gNaT = 69 ENa = 41 gK = 6.9 EK = -100", "total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation", "University of Bath, UK May 1st, 2020 Conductance model of an RVLM neuron", "VV = X[:,0] mm = X[:,1] hh = X[:,2] nn = X[:,3] #", "''' <NAME> (<EMAIL>) Department of Physics University of Bath, UK May 1st, 2020", "############################################################################## # Running model: forward-integrating the equations of motion ############################################################################## # Integrate model", "gK = 6.9 EK = -100 EL = -65 gLeak = 0.465 amV1", "sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV -", "t_f = 300 ############################################################################## # Initializing the neuron model ############################################################################## # Initialize state", "x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state resting potential", "+ I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV)", "idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) +", "The sodium current is controlled by an activation gating variable (m) and an", "as plt from scipy.integrate import odeint # Define constants TEMP_C = 35 FARADAY", "Arguments: state derivative, initial neuron state x(0), time point array X = odeint(dXdt,", "gLeak * (VV - EL) def I_K(VV,nn): return gK * nn**4 * (VV", "anV3 = 23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing current", "import odeint # Define constants TEMP_C = 35 FARADAY = 96480 PI =", "point array X = odeint(dXdt, init, t) # Define variables to simplify analysis", "def I_K(VV,nn): return gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return", "stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation time series", "= open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() #", "dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI # Define model parameters #", "amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV", "# Function for loading current injection protocol (uA/cm^2) # Args: file path, amplitude", "idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01", "four state variables - the membrane voltage and the three gating varibales m,", "variable output by the python script. ''' import scipy as sp import numpy", "model state x comprises four state variables - the membrane voltage and the", "sodium currents def I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn): return", "- EL) def I_K(VV,nn): return gK * nn**4 * (VV - EK) def", "membrane capacitance: Cm; # time constants: tx0, epsx Cm = 1 gNaT =", "This is the state variable output by the python script. ''' import scipy", "-65 gLeak = 0.465 amV1 = -39.92 amV2 = 10 amV3 = 23.39", "Model Equations of Motion ############################################################################## # Define functions for gating kinetics of ion", "controlled by an activation gating variable (m) and an inactivation gating variable (h).", "transient sodium currents def I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn):", "aXV1; membrane capacitance: Cm; # time constants: tx0, epsx Cm = 1 gNaT", "0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 -", "stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat',", "0.025 # Generate array of time points, from zero to T t =", "file path, amplitude scale (default = 0.02), sample every 'n'th point def load_stim(name,", "the init array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## #", "- amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/", "to passive leak, delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV): return", "forward-integrating the equations of motion ############################################################################## # Integrate model equations # Arguments: state", "############################################################################## # Function for injected a current step (uA/cm^2) # Args: amplitude, init", "are chosen so as to replicate the behaviour of the thalamocortical relay neuron", "len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting and saving model output", "and end time amp = 0 #0.003 t_i = 100 t_f = 300", "n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or load external", "behaviour of the thalamocortical relay neuron presented in Huguenard J, McCormick DA, Shepherd", "is accounted for by the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV", "amplitude, init time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function", "amp = 0 #0.003 t_i = 100 t_f = 300 ############################################################################## # Initializing", "(<EMAIL>) Department of Physics University of Bath, UK May 1st, 2020 Conductance model", "int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) /", "= int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area)", "+= load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02,", "'w') for i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save current", "+ sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV", "injected a current step (uA/cm^2) # Args: amplitude, init time, final time def", "h, and n, and is thus described as: x = [V,m,h,n] The only", "Model parameters are chosen so as to replicate the behaviour of the thalamocortical", "neuron at steady-state resting potential # Final value in the init array is", "t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## # Define functions", "- anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic", "model parameters # conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance:", "X[:,0] mm = X[:,1] hh = X[:,2] nn = X[:,3] # Adding Gaussian", "# Current step (uA/cm^2) # Define amplitude, init time and end time amp", "inactivation gating variable (h). The potassium channel is non-inactivating and is controlld by", "value in the init array is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0]", "-100 EL = -65 gLeak = 0.465 amV1 = -39.92 amV2 = 10", "- EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh * (VV -", "= np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting and", "/ 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2) # Currents correspond", "for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state", "ins.close() return stim # Initialise stim or load external stimulation files # If", "current (K). The sodium current is controlled by an activation gating variable (m)", "= 35 FARADAY = 96480 PI = 3.14159265359 # Model duration (ms) T", "np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## # Define functions for gating", "time points, from zero to T t = np.arange(0,T,dt) ############################################################################## # Model Equations", "0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 -", "for line in ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close()", "its derivative def dXdt(X,t): VV, mm, hh, nn, idx = X soma_area =", "Final value in the init array is idx (starts at 0) init =", "the three gating varibales m, h, and n, and is thus described as:", "* (VV - EL) def I_K(VV,nn): return gK * nn**4 * (VV -", "20) # Current step (uA/cm^2) # Define amplitude, init time and end time", "anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV -", "the python script. ''' import scipy as sp import numpy as np import", "VV_obs = VV + obs_error ############################################################################## # Plotting and saving model output ##############################################################################", "reads in system state and returns its derivative def dXdt(X,t): VV, mm, hh,", "stimulation to be injected into the neuron ############################################################################## # Function for injected a", "final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading current", "from scipy.integrate import odeint # Define constants TEMP_C = 35 FARADAY = 96480", "Model Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI", "# Args: amplitude, init time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f)", "- anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2)", "0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim", "# Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals", "idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the", "to simplify analysis VV = X[:,0] mm = X[:,1] hh = X[:,2] nn", "the Neuron'. The neuron model consists of three ionic currents: a passive leak", "Physics University of Bath, UK May 1st, 2020 Conductance model of an RVLM", "* (VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 * hh *", "def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/", "saving model output ############################################################################## # Define total current stimulation = stim[0:len(VV)] + i_inj(t)", "% VV_obs[i]) f.close() # Save current stimulation data f = open('output/stimulation.csv', 'w') for", "an inactivation gating variable (h). The potassium channel is non-inactivating and is controlld", "= 41 gK = 6.9 EK = -100 EL = -65 gLeak =", "at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of", "= 0.02), sample every 'n'th point def load_stim(name, scale, n): stim = []", "hh * (VV - ENa) # Define equations of motion for full neuron", "state derivative, initial neuron state x(0), time point array X = odeint(dXdt, init,", "t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state resting", "a transient sodium current (NaT), and a potassium current (K). The sodium current", "series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save", "or load external stimulation files # If not loading in external stim, uncomment", "np import matplotlib.pyplot as plt from scipy.integrate import odeint # Define constants TEMP_C", "i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2)", "# Model Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam =", "membrane voltage and the three gating varibales m, h, and n, and is", "(nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters", "23.39 tm0 = 0.143 epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65", "in external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02,", "RVLM neuron for use with reservoir computing using a modified Hodgkin-Huxley framework of", "20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim +=", "amplitude, init time and end time amp = 0 #0.003 t_i = 100", "gating variable (m) and an inactivation gating variable (h). The potassium channel is", "state x comprises four state variables - the membrane voltage and the three", "= [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state resting potential #", "load_stim(name, scale, n): stim = [] with open(name, \"r\") as ins: count =", "10 amV3 = 23.39 tm0 = 0.143 epsm = 1.099 ahV1 = -65.37", "(with gaussian noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n'", "= -34.58 anV2 = 22.17 anV3 = 23.58 tn0 = 1.291 epsn =", "the neuron model ############################################################################## # Initialize state variable values for t=0: x(0) =", "anV2 = 22.17 anV3 = 23.58 tn0 = 1.291 epsn = 4.314 ##############################################################################", "/ 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV):", "= stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and stimulation time series plt.subplot(2,1,1)", "point def load_stim(name, scale, n): stim = [] with open(name, \"r\") as ins:", "current stimulation data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))): f.write('%f\\n' %", "+ sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV", "of an RVLM neuron for use with reservoir computing using a modified Hodgkin-Huxley", "return amp*(t>t_i) - amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2) #", "Adding Gaussian error to voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0,", "stimulation data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))): f.write('%f\\n' % stimulation[i])", "(in uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium, # and transient", "0 for line in ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n'))))", "(starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations", "Currents correspond to passive leak, delayed-rectifier potassium, # and transient sodium currents def", "single activation gating variable (n). The full model state x comprises four state", "Gaussian error to voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs,", "idx to read in correct current stimulation data point # Function reads in", "return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) /", "initial neuron state x(0), time point array X = odeint(dXdt, init, t) #", "f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close()", "to read in correct current stimulation data point # Function reads in system", "with reservoir computing using a modified Hodgkin-Huxley framework of ion channel gating. Model", "0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of motion", "############################################################################## # Define functions for gating kinetics of ion channels # Effect of", "#stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20)", "# Plotting membrane voltage and stimulation time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\")", "Use idx to read in correct current stimulation data point # Function reads", "computing using a modified Hodgkin-Huxley framework of ion channel gating. Model parameters are", "VV[i]) f.close() # Save voltage data (with gaussian noise) f = open('output/voltage.csv', 'w')", "import numpy as np import matplotlib.pyplot as plt from scipy.integrate import odeint #", "dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm) soma_len =", "3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return", "sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1", "for use with reservoir computing using a modified Hodgkin-Huxley framework of ion channel", "# Save current stimulation data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))):", "and n, and is thus described as: x = [V,m,h,n] The only state", "dt = 0.025 # Generate array of time points, from zero to T", "DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron model consists of", "epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV):", "(tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10)", "a modified Hodgkin-Huxley framework of ion channel gating. Model parameters are chosen so", "= 7400 dt = 0.025 # Generate array of time points, from zero", "# Integrate model equations # Arguments: state derivative, initial neuron state x(0), time", "# Define total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage", "odeint # Define constants TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359", "vals correspond to neuron at steady-state resting potential # Final value in the", "sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting and saving model", "loading in external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat',", "thus described as: x = [V,m,h,n] The only state variable that it is", "n, and is thus described as: x = [V,m,h,n] The only state variable", "# Define equations of motion for full neuron state x = [V,m,h,n] #", "thalamocortical relay neuron presented in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology", "EX; # thresholds: aXV1; membrane capacitance: Cm; # time constants: tx0, epsx Cm", "0.02, 20) # Current step (uA/cm^2) # Define amplitude, init time and end", "soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t)", "import scipy as sp import numpy as np import matplotlib.pyplot as plt from", "neuron model ############################################################################## # Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)]", "# Final value in the init array is idx (starts at 0) init", "69 ENa = 41 gK = 6.9 EK = -100 EL = -65", "= 0.465 amV1 = -39.92 amV2 = 10 amV3 = 23.39 tm0 =", "n): stim = [] with open(name, \"r\") as ins: count = 0 for", "of three ionic currents: a passive leak current, a transient sodium current (NaT),", "= (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt,", "sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV -", "stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or load external stimulation files #", "parameters are chosen so as to replicate the behaviour of the thalamocortical relay", "hh_inf(VV): return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 +", "ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV -", "epsx Cm = 1 gNaT = 69 ENa = 41 gK = 6.9", "If not loading in external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim", "an RVLM neuron for use with reservoir computing using a modified Hodgkin-Huxley framework", "the state variable output by the python script. ''' import scipy as sp", "= 0.143 epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 =", "neuron ############################################################################## # Function for injected a current step (uA/cm^2) # Args: amplitude,", "(NaT), and a potassium current (K). The sodium current is controlled by an", "tm0 = 0.143 epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3", "time series plt.subplot(2,1,1) plt.plot(t,VV_obs,'k',linewidth=0.8) plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() #", "conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; # time", "ENa = 41 gK = 6.9 EK = -100 EL = -65 gLeak", "3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return", "analysis VV = X[:,0] mm = X[:,1] hh = X[:,2] nn = X[:,3]", "nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## #", "init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of motion ##############################################################################", "only state variable that it is possible to measure experimentally is the membrane", "# Define amplitude, init time and end time amp = 0 #0.003 t_i", "model output ############################################################################## # Define total current stimulation = stim[0:len(VV)] + i_inj(t) #", "is the state variable output by the python script. ''' import scipy as", "T t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## # Define", "# Preparing current stimulation to be injected into the neuron ############################################################################## # Function", "np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting and saving", "X[:,1] hh = X[:,2] nn = X[:,3] # Adding Gaussian error to voltage", "def dXdt(X,t): VV, mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx", "3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents (in uA/cm^2) # Currents correspond to", "current, a transient sodium current (NaT), and a potassium current (K). The sodium", "= 100 t_f = 300 ############################################################################## # Initializing the neuron model ############################################################################## #", "line in ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return", "consists of three ionic currents: a passive leak current, a transient sodium current", "external stimulation files # If not loading in external stim, uncomment line below", "FARADAY = 96480 PI = 3.14159265359 # Model duration (ms) T = 7400", "- sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions", "i in range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save current stimulation data", "Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI # Define model parameters", "below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02,", "+= load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02,", "Function for injected a current step (uA/cm^2) # Args: amplitude, init time, final", "obs_error ############################################################################## # Plotting and saving model output ############################################################################## # Define total current", "hh_tau(VV): return (th0 + epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3)))", "init time and end time amp = 0 #0.003 t_i = 100 t_f", "23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing current stimulation to", "current (NaT), and a potassium current (K). The sodium current is controlled by", "= 0.029/PI # Define model parameters # conductances: gX; reversal potentials: EX; #", "stimulation files # If not loading in external stim, uncomment line below #stim", "+ I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV)", "point # Function reads in system state and returns its derivative def dXdt(X,t):", "with open(name, \"r\") as ins: count = 0 for line in ins: count+=1", "variable (n). The full model state x comprises four state variables - the", "voltage and the three gating varibales m, h, and n, and is thus", "stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define amplitude, init", "Initializing the neuron model ############################################################################## # Initialize state variable values for t=0: x(0)", "epsm = 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22 th0", "parameters # conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm;", "gating variable (n). The full model state x comprises four state variables -", "path, amplitude scale (default = 0.02), sample every 'n'th point def load_stim(name, scale,", "Current step (uA/cm^2) # Define amplitude, init time and end time amp =", "and the three gating varibales m, h, and n, and is thus described", "as ins: count = 0 for line in ins: count+=1 if count %", "stim or load external stimulation files # If not loading in external stim,", "by a single activation gating variable (n). The full model state x comprises", "to neuron at steady-state resting potential # Final value in the init array", "injection protocol (uA/cm^2) # Args: file path, amplitude scale (default = 0.02), sample", "= 22.17 anV3 = 23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## #", "= [V,m,h,n] The only state variable that it is possible to measure experimentally", "- ENa) # Define equations of motion for full neuron state x =", "ENa) # Define equations of motion for full neuron state x = [V,m,h,n]", "uA/cm^2) # Currents correspond to passive leak, delayed-rectifier potassium, # and transient sodium", "if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim", "equations of motion for full neuron state x = [V,m,h,n] # Use idx", "reservoir computing using a modified Hodgkin-Huxley framework of ion channel gating. Model parameters", "return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV - amV1)/ amV3))) /", "(m) and an inactivation gating variable (h). The potassium channel is non-inactivating and", "stimulation data point # Function reads in system state and returns its derivative", "line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat',", "state x(0), time point array X = odeint(dXdt, init, t) # Define variables", "20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define amplitude,", "X = odeint(dXdt, init, t) # Define variables to simplify analysis VV =", "############################################################################## # Plotting and saving model output ############################################################################## # Define total current stimulation", "passive leak, delayed-rectifier potassium, # and transient sodium currents def I_Leak(VV): return gLeak", "# Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w') for i", "values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at", "motion for full neuron state x = [V,m,h,n] # Use idx to read", "sodium current is controlled by an activation gating variable (m) and an inactivation", "def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/", "Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt =", "dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm", "range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save voltage data (with gaussian noise)", "time point array X = odeint(dXdt, init, t) # Define variables to simplify", "constants TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359 # Model duration", "35 FARADAY = 96480 PI = 3.14159265359 # Model duration (ms) T =", "zero to T t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ##############################################################################", "ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2))", "measure experimentally is the membrane voltage. This is the state variable output by", "UK May 1st, 2020 Conductance model of an RVLM neuron for use with", "* nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3 *", "# Initialise stim or load external stimulation files # If not loading in", "(h). The potassium channel is non-inactivating and is controlld by a single activation", "return stim # Initialise stim or load external stimulation files # If not", "20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim +=", "''' import scipy as sp import numpy as np import matplotlib.pyplot as plt", "equations of motion ############################################################################## # Integrate model equations # Arguments: state derivative, initial", "of Motion ############################################################################## # Define functions for gating kinetics of ion channels #", "gK * nn**4 * (VV - EK) def I_NaT(VV,mm,hh): return gNaT * mm**3", "0.01 soma_diam = 0.029/PI # Define model parameters # conductances: gX; reversal potentials:", "passive leak current, a transient sodium current (NaT), and a potassium current (K).", "and a potassium current (K). The sodium current is controlled by an activation", "[] with open(name, \"r\") as ins: count = 0 for line in ins:", "a passive leak current, a transient sodium current (NaT), and a potassium current", "% n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim # Initialise stim or load", "data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))): f.write('%f\\n' % stimulation[i]) f.close()", "gNaT * mm**3 * hh * (VV - ENa) # Define equations of", "of the thalamocortical relay neuron presented in Huguenard J, McCormick DA, Shepherd GM", "in system state and returns its derivative def dXdt(X,t): VV, mm, hh, nn,", "count = 0 for line in ins: count+=1 if count % n ==", "# Effect of temperature is accounted for by the Q10 coeff def mm_inf(VV):", "amV2)) def mm_tau(VV): return (tm0 + epsm*(1 - sp.tanh((VV - amV1)/ amV3)*sp.tanh((VV -", "- ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 +", "plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise) f", "def load_stim(name, scale, n): stim = [] with open(name, \"r\") as ins: count", "points, from zero to T t = np.arange(0,T,dt) ############################################################################## # Model Equations of", "= 0 for line in ins: count+=1 if count % n == 0:", "* hh * (VV - ENa) # Define equations of motion for full", "f.write('%f \\n' % VV_obs[i]) f.close() # Save current stimulation data f = open('output/stimulation.csv',", "be injected into the neuron ############################################################################## # Function for injected a current step", "+ epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def", "# Initializing the neuron model ############################################################################## # Initialize state variable values for t=0:", "= 0.025 # Generate array of time points, from zero to T t", "correspond to neuron at steady-state resting potential # Final value in the init", "Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV):", "currents def I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn): return gK", "= X[:,0] mm = X[:,1] hh = X[:,2] nn = X[:,3] # Adding", "Define total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting membrane voltage and", "correct current stimulation data point # Function reads in system state and returns", "(hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt,", "system state and returns its derivative def dXdt(X,t): VV, mm, hh, nn, idx", "load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20)", "coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def mm_tau(VV): return", "in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save voltage data (with gaussian", "error to voltage trace (mV) sigma_obs = 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV))", "= 3.14159265359 # Model duration (ms) T = 7400 dt = 0.025 #", "EL = -65 gLeak = 0.465 amV1 = -39.92 amV2 = 10 amV3", "state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to", "so as to replicate the behaviour of the thalamocortical relay neuron presented in", "# Define constants TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359 #", "functions for gating kinetics of ion channels # Effect of temperature is accounted", "Cm = 1 gNaT = 69 ENa = 41 gK = 6.9 EK", "of ion channels # Effect of temperature is accounted for by the Q10", "voltage. This is the state variable output by the python script. ''' import", "= 1 gNaT = 69 ENa = 41 gK = 6.9 EK =", "to measure experimentally is the membrane voltage. This is the state variable output", "= -39.92 amV2 = 10 amV3 = 23.39 tm0 = 0.143 epsm =", "#0.003 t_i = 100 t_f = 300 ############################################################################## # Initializing the neuron model", "data (with gaussian noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f", "= 1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22 th0 =", "f.write('%f \\n' % VV[i]) f.close() # Save voltage data (with gaussian noise) f", "22.17 anV3 = 23.58 tn0 = 1.291 epsn = 4.314 ############################################################################## # Preparing", "variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] # Default vals correspond to neuron", "epsn = 4.314 ############################################################################## # Preparing current stimulation to be injected into the", "= open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() #", "dmmdt, dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm)", "- ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/", "stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim", "anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV", "three gating varibales m, h, and n, and is thus described as: x", "= (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## # Model", "Default vals correspond to neuron at steady-state resting potential # Final value in", "= VV + obs_error ############################################################################## # Plotting and saving model output ############################################################################## #", "# Function for injected a current step (uA/cm^2) # Args: amplitude, init time,", "Args: amplitude, init time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) #", "0.029/PI # Define model parameters # conductances: gX; reversal potentials: EX; # thresholds:", "= 4.314 ############################################################################## # Preparing current stimulation to be injected into the neuron", "external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20)", "for gating kinetics of ion channels # Effect of temperature is accounted for", "I_K(VV,nn) + I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) -", "in correct current stimulation data point # Function reads in system state and", "# If not loading in external stim, uncomment line below #stim = np.zeros(int(2*T/dt))", "= X[:,1] hh = X[:,2] nn = X[:,3] # Adding Gaussian error to", "is controlled by an activation gating variable (m) and an inactivation gating variable", "VV, mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt)", "variable (h). The potassium channel is non-inactivating and is controlld by a single", "= 0.701 epsh = 12.90 anV1 = -34.58 anV2 = 22.17 anV3 =", "gaussian noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' %", "+ epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) #", "# Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI # Define model", "step (uA/cm^2) # Define amplitude, init time and end time amp = 0", "dhhdt, dnndt, idx ############################################################################## # Model Parameters ############################################################################## # Soma dimensions (cm) soma_len", "+ sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 + epsn*(1 - sp.tanh((VV", "= [] with open(name, \"r\") as ins: count = 0 for line in", "4.314 ############################################################################## # Preparing current stimulation to be injected into the neuron ##############################################################################", "GM (1997) 'Electrophysiology of the Neuron'. The neuron model consists of three ionic", "The full model state x comprises four state variables - the membrane voltage", "\\n' % VV[i]) f.close() # Save voltage data (with gaussian noise) f =", "= 0.01 soma_diam = 0.029/PI # Define model parameters # conductances: gX; reversal", "gaussian noise) f = open('output/voltage.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' %", "load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current step (uA/cm^2) # Define amplitude, init time and", "of time points, from zero to T t = np.arange(0,T,dt) ############################################################################## # Model", "load_stim('stim_files/Pstandard_100khz_1.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20)", "odeint(dXdt, init, t) # Define variables to simplify analysis VV = X[:,0] mm", "nn = X[:,3] # Adding Gaussian error to voltage trace (mV) sigma_obs =", "idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) +", "# conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane capacitance: Cm; #", "in ins: count+=1 if count % n == 0: stim.append(scale*(float(line.rstrip('\\n')))) ins.close() return stim", "= 0 #0.003 t_i = 100 t_f = 300 ############################################################################## # Initializing the", "read in correct current stimulation data point # Function reads in system state", "= 12.90 anV1 = -34.58 anV2 = 22.17 anV3 = 23.58 tn0 =", "model ############################################################################## # Initialize state variable values for t=0: x(0) = [V(0),m(0),h(0),n(0)] #", "dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx ############################################################################## #", "potassium channel is non-inactivating and is controlld by a single activation gating variable", "equations # Arguments: state derivative, initial neuron state x(0), time point array X", "soma_diam = 0.029/PI # Define model parameters # conductances: gX; reversal potentials: EX;", "stim = [] with open(name, \"r\") as ins: count = 0 for line", "matplotlib.pyplot as plt from scipy.integrate import odeint # Define constants TEMP_C = 35", "array X = odeint(dXdt, init, t) # Define variables to simplify analysis VV", "= 10 amV3 = 23.39 tm0 = 0.143 epsm = 1.099 ahV1 =", "x = [V,m,h,n] # Use idx to read in correct current stimulation data", "sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for", "hh = X[:,2] nn = X[:,3] # Adding Gaussian error to voltage trace", "= 0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ##############################################################################", "Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian", "(mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise)", "import matplotlib.pyplot as plt from scipy.integrate import odeint # Define constants TEMP_C =", "# Generate array of time points, from zero to T t = np.arange(0,T,dt)", "The potassium channel is non-inactivating and is controlld by a single activation gating", "and saving model output ############################################################################## # Define total current stimulation = stim[0:len(VV)] +", "\"r\") as ins: count = 0 for line in ins: count+=1 if count", "# Plotting and saving model output ############################################################################## # Define total current stimulation =", "Save current stimulation data f = open('output/stimulation.csv', 'w') for i in range(int(len(VV))): f.write('%f\\n'", "amV3)*sp.tanh((VV - amV1)/ amV3))) / 3.0**((TEMP_C-23.5)/10) def hh_inf(VV): return 0.5*(1 + sp.tanh((VV -", "time constants: tx0, epsx Cm = 1 gNaT = 69 ENa = 41", "= [V,m,h,n] # Use idx to read in correct current stimulation data point", "anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define functions for ionic currents", "variable (m) and an inactivation gating variable (h). The potassium channel is non-inactivating", "0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_2.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim", "0.1 obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## #", "plt from scipy.integrate import odeint # Define constants TEMP_C = 35 FARADAY =", "not loading in external stim, uncomment line below #stim = np.zeros(int(2*T/dt)) stim =", "ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV", "epsn*(1 - sp.tanh((VV - anV1)/ anV3)*sp.tanh((VV - anV1)/ anV3))) / 3.0**((TEMP_C-23.5)/10) # Define", "# Currents correspond to passive leak, delayed-rectifier potassium, # and transient sodium currents", "range(int(len(VV))): f.write('%f \\n' % VV_obs[i]) f.close() # Save current stimulation data f =", "+ (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt =", "# Adding Gaussian error to voltage trace (mV) sigma_obs = 0.1 obs_error =", "x comprises four state variables - the membrane voltage and the three gating", "init time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for", "amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2) # Args: file path,", "[V(0),m(0),h(0),n(0)] # Default vals correspond to neuron at steady-state resting potential # Final", "gNaT = 69 ENa = 41 gK = 6.9 EK = -100 EL", "i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save voltage data (with", "Hodgkin-Huxley framework of ion channel gating. Model parameters are chosen so as to", "full neuron state x = [V,m,h,n] # Use idx to read in correct", "ion channel gating. Model parameters are chosen so as to replicate the behaviour", "varibales m, h, and n, and is thus described as: x = [V,m,h,n]", "described as: x = [V,m,h,n] The only state variable that it is possible", "output by the python script. ''' import scipy as sp import numpy as", "th0 = 0.701 epsh = 12.90 anV1 = -34.58 anV2 = 22.17 anV3", "1.099 ahV1 = -65.37 ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701", "of Bath, UK May 1st, 2020 Conductance model of an RVLM neuron for", "t_i = 100 t_f = 300 ############################################################################## # Initializing the neuron model ##############################################################################", "kinetics of ion channels # Effect of temperature is accounted for by the", "(uA/cm^2) # Args: file path, amplitude scale (default = 0.02), sample every 'n'th", "# Define functions for ionic currents (in uA/cm^2) # Currents correspond to passive", "membrane voltage. This is the state variable output by the python script. '''", "of temperature is accounted for by the Q10 coeff def mm_inf(VV): return 0.5*(1", "# Default vals correspond to neuron at steady-state resting potential # Final value", "Generate array of time points, from zero to T t = np.arange(0,T,dt) ##############################################################################", "Effect of temperature is accounted for by the Q10 coeff def mm_inf(VV): return", "Define constants TEMP_C = 35 FARADAY = 96480 PI = 3.14159265359 # Model", "- amp*(t>t_f) # Function for loading current injection protocol (uA/cm^2) # Args: file", "uncomment line below #stim = np.zeros(int(2*T/dt)) stim = load_stim('stim_files/Pstandard_100khz_0.dat', 0.02, 20) stim +=", "and is thus described as: x = [V,m,h,n] The only state variable that", "return 0.5*(1 + sp.tanh((VV - ahV1)/ ahV2)) def hh_tau(VV): return (th0 + epsh*(1", "the neuron ############################################################################## # Function for injected a current step (uA/cm^2) # Args:", "def I_Leak(VV): return gLeak * (VV - EL) def I_K(VV,nn): return gK *", "(i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt = (hh_inf(VV)", "injected into the neuron ############################################################################## # Function for injected a current step (uA/cm^2)", "ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def", "epsh*(1 - sp.tanh((VV - ahV1)/ ahV3)*sp.tanh((VV - ahV1)/ ahV3))) / 3.0**((TEMP_C-23.5)/10) def nn_inf(VV):", "soma_len = 0.01 soma_diam = 0.029/PI # Define model parameters # conductances: gX;", "model of an RVLM neuron for use with reservoir computing using a modified", "the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV - amV1)/ amV2)) def", "time, final time def i_inj(t): return amp*(t>t_i) - amp*(t>t_f) # Function for loading", "output ############################################################################## # Define total current stimulation = stim[0:len(VV)] + i_inj(t) # Plotting", "is possible to measure experimentally is the membrane voltage. This is the state", "channels # Effect of temperature is accounted for by the Q10 coeff def", "Define amplitude, init time and end time amp = 0 #0.003 t_i =", "to T t = np.arange(0,T,dt) ############################################################################## # Model Equations of Motion ############################################################################## #", "to replicate the behaviour of the thalamocortical relay neuron presented in Huguenard J,", "plt.ylabel(\"Membrane Potential (mV)\") plt.subplot(2,1,2) plt.ylabel(\"Current (uA)\") plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without", "variables - the membrane voltage and the three gating varibales m, h, and", "# Model Equations of Motion ############################################################################## # Define functions for gating kinetics of", "I_Leak(VV)) + (i_inj(t) + stim[idx])/soma_area) / Cm dmmdt = (mm_inf(VV) - mm)/mm_tau(VV) dhhdt", "accounted for by the Q10 coeff def mm_inf(VV): return 0.5*(1 + sp.tanh((VV -", "# Args: file path, amplitude scale (default = 0.02), sample every 'n'th point", "model equations # Arguments: state derivative, initial neuron state x(0), time point array", "ahV2 = -17.65 ahV3 = 27.22 th0 = 0.701 epsh = 12.90 anV1", "-39.92 amV2 = 10 amV3 = 23.39 tm0 = 0.143 epsm = 1.099", "plt.plot(t,stimulation,'b',linewidth=0.8) plt.show() # Save voltage data (without gaussian noise) f = open('output/voltage_clean.csv', 'w')", "Parameters ############################################################################## # Soma dimensions (cm) soma_len = 0.01 soma_diam = 0.029/PI #", "Equations of Motion ############################################################################## # Define functions for gating kinetics of ion channels", "soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh) + I_K(VV,nn) + I_Leak(VV))", "a potassium current (K). The sodium current is controlled by an activation gating", "nn_inf(VV): return 0.5*(1 + sp.tanh((VV - anV1)/ anV2)) def nn_tau(VV): return (tn0 +", "* (VV - ENa) # Define equations of motion for full neuron state", "temperature is accounted for by the Q10 coeff def mm_inf(VV): return 0.5*(1 +", "noise) f = open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV[i])", "the behaviour of the thalamocortical relay neuron presented in Huguenard J, McCormick DA,", "Model duration (ms) T = 7400 dt = 0.025 # Generate array of", "dhhdt = (hh_inf(VV) - hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt,", "gLeak = 0.465 amV1 = -39.92 amV2 = 10 amV3 = 23.39 tm0", "mm, hh, nn, idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt", "neuron presented in Huguenard J, McCormick DA, Shepherd GM (1997) 'Electrophysiology of the", "anV1 = -34.58 anV2 = 22.17 anV3 = 23.58 tn0 = 1.291 epsn", "open('output/voltage_clean.csv', 'w') for i in range(int(len(VV))): f.write('%f \\n' % VV[i]) f.close() # Save", "Neuron'. The neuron model consists of three ionic currents: a passive leak current,", "obs_error = np.random.normal(0, sigma_obs, len(VV)) VV_obs = VV + obs_error ############################################################################## # Plotting", "[-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating the equations of motion ############################################################################## # Integrate", "Define model parameters # conductances: gX; reversal potentials: EX; # thresholds: aXV1; membrane", "= 27.22 th0 = 0.701 epsh = 12.90 anV1 = -34.58 anV2 =", "Bath, UK May 1st, 2020 Conductance model of an RVLM neuron for use", "is idx (starts at 0) init = [-65,0.00742,0.47258,0.06356,0] ############################################################################## # Running model: forward-integrating", "Conductance model of an RVLM neuron for use with reservoir computing using a", "May 1st, 2020 Conductance model of an RVLM neuron for use with reservoir", "# Define functions for gating kinetics of ion channels # Effect of temperature", "is non-inactivating and is controlld by a single activation gating variable (n). The", "nn, idx = X soma_area = soma_len*soma_diam*PI idx = int(t/dt) dVVdt = (-(I_NaT(VV,mm,hh)", "scipy as sp import numpy as np import matplotlib.pyplot as plt from scipy.integrate", "- hh)/hh_tau(VV) dnndt = (nn_inf(VV) - nn)/nn_tau(VV) return dVVdt, dmmdt, dhhdt, dnndt, idx", "McCormick DA, Shepherd GM (1997) 'Electrophysiology of the Neuron'. The neuron model consists", "step (uA/cm^2) # Args: amplitude, init time, final time def i_inj(t): return amp*(t>t_i)", "chosen so as to replicate the behaviour of the thalamocortical relay neuron presented", "20) stim += load_stim('stim_files/Pstandard_100khz_3.dat', 0.02, 20) stim += load_stim('stim_files/Pstandard_100khz_4.dat', 0.02, 20) # Current" ]
[]
[ "gates. import sys import os #defining input def Input(): upper_bit_int = int(input(\"Enter \\'first", "carry) result.append(carry) return list(reversed(result)) # Final processing and printing the sum def Output():", "carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing", "= ',final_sum_bin) print() # loop for adding two integers again and again unless", "def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a,", "return ~bitValue # defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne),", "carry = 0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry))", "in list('{:08b}'.format(lower_bit_int))] # output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin =", "a digital circuit performing integer addition. # It adds two 8 bit binary", "the input range min=0 and max=255 print('calculating sum...') else : print('Error: Input is", "= input(\"Do you want to add again Y/N : \") if quit.lower() ==", "else : print('Error: Input is not in range') print('Restarting...') print() del upper_bit_int del", "print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for x in", "first input from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255", "bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result) final_sum_int = int(''.join(str(e) for", "== 'n': exit() elif quit.lower() == 'y': print() Output() print('This is a python", "again Y/N : \") if quit.lower() == 'n': exit() elif quit.lower() == 'y':", "the user return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne, bitTwo): return bitOne", "bitTwo): return bitOne & bitTwo # defining or gate def orGate(bitOne, bitTwo): return", "python program which simulates the behaviour of a digital circuit performing integer addition.')", "x in list('{:08b}'.format(lower_bit_int))] # output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin", "or gate def orGate(bitOne, bitTwo): return bitOne | bitTwo # defining not gate", "and printing the sum def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and", "carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing and", "# Final processing and printing the sum def Output(): upper_bit_int, lower_bit_int = Input()", "= bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result) final_sum_int = int(''.join(str(e)", "final_sum_int = int(''.join(str(e) for e in result),2) print('Integer sum using bit operation =", "of a digital circuit performing integer addition. # It adds two 8 bit", "bitTwo # defining not gate def compliment(bitValue): return ~bitValue # defining xor gate", "to 255 : \")) # first input from the user lower_bit_int= int(input(\"Enter \\'second", "integer addition. # It adds two 8 bit binary numbers using different logical", "defining or gate def orGate(bitOne, bitTwo): return bitOne | bitTwo # defining not", "orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit): result = [] carry", "from 0 to 255 : \")) # first input from the user lower_bit_int=", "defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating", "sum...') else : print('Error: Input is not in range') print('Restarting...') print() del upper_bit_int", "',final_sum_bin) print() # loop for adding two integers again and again unless user", "# loop for adding two integers again and again unless user exits quit", "upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit =", "result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result) final_sum_int =", "', final_sum_int ,' In Binary = ',final_sum_bin) print() # loop for adding two", "to add again Y/N : \") if quit.lower() == 'n': exit() elif quit.lower()", "performing integer addition. # It adds two 8 bit binary numbers using different", "b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit):", "the sum def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking", "from the user return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne, bitTwo): return", "lower_bit_int) # defining and gate def andGate(bitOne, bitTwo): return bitOne & bitTwo #", "if quit.lower() == 'n': exit() elif quit.lower() == 'y': print() Output() print('This is", "defining not gate def compliment(bitValue): return ~bitValue # defining xor gate def xorGate(bitOne,", "list('{:08b}'.format(lower_bit_int))] # output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e)", "loop for adding two integers again and again unless user exits quit =", "program which simulates the behaviour of a digital circuit performing integer addition. #", "which simulates the behaviour of a digital circuit performing integer addition.') print() Output()", "Binary = ',final_sum_bin) print() # loop for adding two integers again and again", "Y/N : \") if quit.lower() == 'n': exit() elif quit.lower() == 'y': print()", "not in range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit = [int(x)", "want to add again Y/N : \") if quit.lower() == 'n': exit() elif", "Output() print('This is a python program which simulates the behaviour of a digital", "if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255 print('calculating sum...')", "bitOne | bitTwo # defining not gate def compliment(bitValue): return ~bitValue # defining", "bit operation = ', final_sum_int ,' In Binary = ',final_sum_bin) print() # loop", "integers again and again unless user exits quit = input(\"Do you want to", "result) final_sum_int = int(''.join(str(e) for e in result),2) print('Integer sum using bit operation", "in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of bit", "255>=lower_bit_int>=0: #checking the input range min=0 and max=255 print('calculating sum...') else : print('Error:", "255 : \")) # first input from the user lower_bit_int= int(input(\"Enter \\'second integer\\'", "using different logical gates. import sys import os #defining input def Input(): upper_bit_int", "def orGate(bitOne, bitTwo): return bitOne | bitTwo # defining not gate def compliment(bitValue):", "second input from the user return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne,", "print() del upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))]", "bitOne & bitTwo # defining or gate def orGate(bitOne, bitTwo): return bitOne |", "Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x", "Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range", "elif quit.lower() == 'y': print() Output() print('This is a python program which simulates", "print('This is a python program which simulates the behaviour of a digital circuit", "\\'second integer\\' from 0 to 255 : \")) # second input from the", "\") if quit.lower() == 'n': exit() elif quit.lower() == 'y': print() Output() print('This", "lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) #", "= Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255", "in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal,", "input from the user return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne, bitTwo):", "calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing and printing the", "del lower_bit_int Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x)", "output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e", "operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result) final_sum_int", "result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final", "| bitTwo # defining not gate def compliment(bitValue): return ~bitValue # defining xor", "255 : \")) # second input from the user return(upper_bit_int, lower_bit_int) # defining", "Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to 255 : \")) #", "and again unless user exits quit = input(\"Do you want to add again", "return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b, c, d):", "exits quit = input(\"Do you want to add again Y/N : \") if", "in result) final_sum_int = int(''.join(str(e) for e in result),2) print('Integer sum using bit", "final_sum_int ,' In Binary = ',final_sum_bin) print() # loop for adding two integers", "c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit): result", "list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result) final_sum_int = int(''.join(str(e) for e", "result.append(carry) return list(reversed(result)) # Final processing and printing the sum def Output(): upper_bit_int,", "[int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] #", "# defining or gate def orGate(bitOne, bitTwo): return bitOne | bitTwo # defining", "andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit): result = [] carry =", "unless user exits quit = input(\"Do you want to add again Y/N :", "operation def bitOperation(upper_bit, lower_bit): result = [] carry = 0 for index in", "for e in result),2) print('Integer sum using bit operation = ', final_sum_int ,'", "calculating carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit", "return list(reversed(result)) # Final processing and printing the sum def Output(): upper_bit_int, lower_bit_int", "lower_bit): result = [] carry = 0 for index in range(len(upper_bit)): after_xor_cal =", "list(reversed(result)) # Final processing and printing the sum def Output(): upper_bit_int, lower_bit_int =", "int(input(\"Enter \\'first integer\\' from 0 to 255 : \")) # first input from", "is not in range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit =", "int(''.join(str(e) for e in result),2) print('Integer sum using bit operation = ', final_sum_int", "again and again unless user exits quit = input(\"Do you want to add", "byte-adder.py # A simple python program which simulates the behaviour of a digital", "os #defining input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to", "# performing bit operation def bitOperation(upper_bit, lower_bit): result = [] carry = 0", "and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255 print('calculating sum...') else :", "again unless user exits quit = input(\"Do you want to add again Y/N", "defining and gate def andGate(bitOne, bitTwo): return bitOne & bitTwo # defining or", "# first input from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to", "add again Y/N : \") if quit.lower() == 'n': exit() elif quit.lower() ==", "\")) # first input from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0", "and max=255 print('calculating sum...') else : print('Error: Input is not in range') print('Restarting...')", "def compliment(bitValue): return ~bitValue # defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne,", "list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of bit operation", "quit.lower() == 'n': exit() elif quit.lower() == 'y': print() Output() print('This is a", "0 to 255 : \")) # first input from the user lower_bit_int= int(input(\"Enter", "gate def andGate(bitOne, bitTwo): return bitOne & bitTwo # defining or gate def", "andGate(bitOne, bitTwo): return bitOne & bitTwo # defining or gate def orGate(bitOne, bitTwo):", "not gate def compliment(bitValue): return ~bitValue # defining xor gate def xorGate(bitOne, bitTwo):", "bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b, c,", "upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in", "for x in list('{:08b}'.format(lower_bit_int))] # output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit)))", "= [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))]", "print() Output() print('This is a python program which simulates the behaviour of a", "input(\"Do you want to add again Y/N : \") if quit.lower() == 'n':", "from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255 : \"))", "# defining and gate def andGate(bitOne, bitTwo): return bitOne & bitTwo # defining", "for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output", "quit = input(\"Do you want to add again Y/N : \") if quit.lower()", "the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255 : \")) #", "gate def compliment(bitValue): return ~bitValue # defining xor gate def xorGate(bitOne, bitTwo): return", "import os #defining input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0", "return bitOne & bitTwo # defining or gate def orGate(bitOne, bitTwo): return bitOne", "to 255 : \")) # second input from the user return(upper_bit_int, lower_bit_int) #", "range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for x", "def andGate(bitOne, bitTwo): return bitOne & bitTwo # defining or gate def orGate(bitOne,", "xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b,", "def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input", "= int(input(\"Enter \\'first integer\\' from 0 to 255 : \")) # first input", "x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of", "[] carry = 0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal,", "# second input from the user return(upper_bit_int, lower_bit_int) # defining and gate def", "for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index],", "after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry)", "numbers using different logical gates. import sys import os #defining input def Input():", "printing the sum def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0:", "in range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for", "'n': exit() elif quit.lower() == 'y': print() Output() print('This is a python program", "A simple python program which simulates the behaviour of a digital circuit performing", "upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to 255 : \")) # first", "compliment(bitValue): return ~bitValue # defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)),", "= calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing and printing", "& bitTwo # defining or gate def orGate(bitOne, bitTwo): return bitOne | bitTwo", "carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation", "print('Error: Input is not in range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output()", "program which simulates the behaviour of a digital circuit performing integer addition.') print()", "binary numbers using different logical gates. import sys import os #defining input def", "the behaviour of a digital circuit performing integer addition. # It adds two", "simulates the behaviour of a digital circuit performing integer addition. # It adds", "is a python program which simulates the behaviour of a digital circuit performing", "user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255 : \")) # second", "# byte-adder.py # A simple python program which simulates the behaviour of a", "bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in result)", "[int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of bit operation result = bitOperation(list(reversed(upper_bit)),", "operation = ', final_sum_int ,' In Binary = ',final_sum_bin) print() # loop for", "def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to 255 : \"))", "= ', final_sum_int ,' In Binary = ',final_sum_bin) print() # loop for adding", ": \") if quit.lower() == 'n': exit() elif quit.lower() == 'y': print() Output()", "logical gates. import sys import os #defining input def Input(): upper_bit_int = int(input(\"Enter", "# calculating carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing", "two 8 bit binary numbers using different logical gates. import sys import os", "adds two 8 bit binary numbers using different logical gates. import sys import", "python program which simulates the behaviour of a digital circuit performing integer addition.", "simple python program which simulates the behaviour of a digital circuit performing integer", "'y': print() Output() print('This is a python program which simulates the behaviour of", "calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit,", "gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def", "in result),2) print('Integer sum using bit operation = ', final_sum_int ,' In Binary", "behaviour of a digital circuit performing integer addition. # It adds two 8", "= xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return", "= [] carry = 0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index])", "\")) # second input from the user return(upper_bit_int, lower_bit_int) # defining and gate", "= int(''.join(str(e) for e in result),2) print('Integer sum using bit operation = ',", "#defining input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to 255", "bit operation def bitOperation(upper_bit, lower_bit): result = [] carry = 0 for index", ": \")) # second input from the user return(upper_bit_int, lower_bit_int) # defining and", "adding two integers again and again unless user exits quit = input(\"Do you", "lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and", "user exits quit = input(\"Do you want to add again Y/N : \")", "~bitValue # defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo))", "you want to add again Y/N : \") if quit.lower() == 'n': exit()", "''.join(str(e) for e in result) final_sum_int = int(''.join(str(e) for e in result),2) print('Integer", "int(input(\"Enter \\'second integer\\' from 0 to 255 : \")) # second input from", "import sys import os #defining input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\'", "return bitOne | bitTwo # defining not gate def compliment(bitValue): return ~bitValue #", "a python program which simulates the behaviour of a digital circuit performing integer", "index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index],", "and gate def andGate(bitOne, bitTwo): return bitOne & bitTwo # defining or gate", "e in result),2) print('Integer sum using bit operation = ', final_sum_int ,' In", "different logical gates. import sys import os #defining input def Input(): upper_bit_int =", "0 to 255 : \")) # second input from the user return(upper_bit_int, lower_bit_int)", "# defining not gate def compliment(bitValue): return ~bitValue # defining xor gate def", "bit binary numbers using different logical gates. import sys import os #defining input", "return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne, bitTwo): return bitOne & bitTwo", "8 bit binary numbers using different logical gates. import sys import os #defining", "sum using bit operation = ', final_sum_int ,' In Binary = ',final_sum_bin) print()", "# A simple python program which simulates the behaviour of a digital circuit", "digital circuit performing integer addition. # It adds two 8 bit binary numbers", "exit() elif quit.lower() == 'y': print() Output() print('This is a python program which", "quit.lower() == 'y': print() Output() print('This is a python program which simulates the", "\\'first integer\\' from 0 to 255 : \")) # first input from the", "print('calculating sum...') else : print('Error: Input is not in range') print('Restarting...') print() del", "min=0 and max=255 print('calculating sum...') else : print('Error: Input is not in range')", "sys import os #defining input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from", "xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result))", ",' In Binary = ',final_sum_bin) print() # loop for adding two integers again", "after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing and printing the sum def", "upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0", "for adding two integers again and again unless user exits quit = input(\"Do", "255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255 print('calculating sum...') else", "using bit operation = ', final_sum_int ,' In Binary = ',final_sum_bin) print() #", "input from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255 :", "def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def", "# It adds two 8 bit binary numbers using different logical gates. import", "integer\\' from 0 to 255 : \")) # second input from the user", "performing bit operation def bitOperation(upper_bit, lower_bit): result = [] carry = 0 for", "user return(upper_bit_int, lower_bit_int) # defining and gate def andGate(bitOne, bitTwo): return bitOne &", "compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b),", "range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry = calculateCarry(upper_bit[index], lower_bit[index], after_xor_cal, carry)", "max=255 print('calculating sum...') else : print('Error: Input is not in range') print('Restarting...') print()", "lower_bit_int Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit = [int(x) for", "orGate(bitOne, bitTwo): return bitOne | bitTwo # defining not gate def compliment(bitValue): return", "addition. # It adds two 8 bit binary numbers using different logical gates.", "from 0 to 255 : \")) # second input from the user return(upper_bit_int,", "gate def orGate(bitOne, bitTwo): return bitOne | bitTwo # defining not gate def", "= [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of bit operation result =", "for e in result) final_sum_int = int(''.join(str(e) for e in result),2) print('Integer sum", "It adds two 8 bit binary numbers using different logical gates. import sys", "bitTwo): return bitOne | bitTwo # defining not gate def compliment(bitValue): return ~bitValue", "0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry =", "#checking the input range min=0 and max=255 print('calculating sum...') else : print('Error: Input", "del upper_bit_int del lower_bit_int Output() upper_bit = [int(x) for x in list('{:08b}'.format(upper_bit_int))] lower_bit", "lower_bit_int= int(input(\"Enter \\'second integer\\' from 0 to 255 : \")) # second input", "xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry", "= ''.join(str(e) for e in result) final_sum_int = int(''.join(str(e) for e in result),2)", "result),2) print('Integer sum using bit operation = ', final_sum_int ,' In Binary =", "circuit performing integer addition. # It adds two 8 bit binary numbers using", "input def Input(): upper_bit_int = int(input(\"Enter \\'first integer\\' from 0 to 255 :", "e in result) final_sum_int = int(''.join(str(e) for e in result),2) print('Integer sum using", "final_sum_bin = ''.join(str(e) for e in result) final_sum_int = int(''.join(str(e) for e in", "print() # loop for adding two integers again and again unless user exits", "# defining xor gate def xorGate(bitOne, bitTwo): return orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) #", "== 'y': print() Output() print('This is a python program which simulates the behaviour", "processing and printing the sum def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0", "integer\\' from 0 to 255 : \")) # first input from the user", "In Binary = ',final_sum_bin) print() # loop for adding two integers again and", "return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit): result = []", "Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the input range min=0 and max=255 print('calculating", "Input is not in range') print('Restarting...') print() del upper_bit_int del lower_bit_int Output() upper_bit", "sum def Output(): upper_bit_int, lower_bit_int = Input() if 255>=upper_bit_int>=0 and 255>=lower_bit_int>=0: #checking the", ": \")) # first input from the user lower_bit_int= int(input(\"Enter \\'second integer\\' from", "def bitOperation(upper_bit, lower_bit): result = [] carry = 0 for index in range(len(upper_bit)):", "bitTwo # defining or gate def orGate(bitOne, bitTwo): return bitOne | bitTwo #", "= 0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index], lower_bit[index]) result.append(xorGate(after_xor_cal, carry)) carry", "d): return orGate(andGate(a,b), andGate(c,d)) # performing bit operation def bitOperation(upper_bit, lower_bit): result =", "lower_bit = [int(x) for x in list('{:08b}'.format(lower_bit_int))] # output of bit operation result", "orGate(andGate(bitOne, compliment(bitTwo)), andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b, c, d): return", "two integers again and again unless user exits quit = input(\"Do you want", "andGate(compliment(bitOne), bitTwo)) # calculating carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d))", "result = [] carry = 0 for index in range(len(upper_bit)): after_xor_cal = xorGate(upper_bit[index],", "which simulates the behaviour of a digital circuit performing integer addition. # It", "lower_bit[index], after_xor_cal, carry) result.append(carry) return list(reversed(result)) # Final processing and printing the sum", "input range min=0 and max=255 print('calculating sum...') else : print('Error: Input is not", "range min=0 and max=255 print('calculating sum...') else : print('Error: Input is not in", "of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for e in", "# output of bit operation result = bitOperation(list(reversed(upper_bit)), list(reversed(lower_bit))) final_sum_bin = ''.join(str(e) for", "bitTwo)) # calculating carry def calculateCarry(a, b, c, d): return orGate(andGate(a,b), andGate(c,d)) #", "bitOperation(upper_bit, lower_bit): result = [] carry = 0 for index in range(len(upper_bit)): after_xor_cal", "Final processing and printing the sum def Output(): upper_bit_int, lower_bit_int = Input() if", ": print('Error: Input is not in range') print('Restarting...') print() del upper_bit_int del lower_bit_int", "print('Integer sum using bit operation = ', final_sum_int ,' In Binary = ',final_sum_bin)" ]
[ "__name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C", "from pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\":", "if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water # Disulfide bridge lines", "import sys from pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__", "and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water #", "format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify", "env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if env.prm[\"TERMINALS\"].upper()", "N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water", "C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water # Disulfide", "== \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal", "python import sys from pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if", "%(message)s') if __name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N", "identify N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed", "\"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if", "prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() #", "terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water # Disulfide bridge", "if __name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and", "# identify N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove", "prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if env.prm[\"TERMINALS\"].upper() ==", "import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot", "== \"T\": prot.identify_nc() # remove exposed water # Disulfide bridge lines = prot.pdblines()", "import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot = Protein()", "logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) #", "* import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot =", "env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc() # remove exposed water # Disulfide bridge lines =", "pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init()", "= Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\":", "\"T\": prot.identify_nc() # remove exposed water # Disulfide bridge lines = prot.pdblines() open(env.fn_step1_out,\"w\").writelines(lines)", "logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ == \"__main__\": env.init() prot = Protein() prot.load_nativepdb(env.prm[\"INPDB\"])", "Protein() prot.load_nativepdb(env.prm[\"INPDB\"]) # identify N and C terminal if env.prm[\"TERMINALS\"].upper() == \"T\": prot.identify_nc()", "sys from pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s') if __name__ ==", "#!/usr/bin/env python import sys from pymccelib import * import logging logging.basicConfig(level=logging.DEBUG, format='%(levelname)-s: %(message)s')" ]
[ "# # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under", "Author: <NAME>, Finland 2015 # # This file is part of Kunquat. #", "have waived all # copyright and related or neighboring rights to Kunquat. #", "Kunquat Affirmers have waived all # copyright and related or neighboring rights to", "None def set_controller(self, controller): self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self, selection):", "# class OrderlistManager(): def __init__(self): self._controller = None self._session = None def set_controller(self,", "CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat", "http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers have waived", "Affirmers have waived all # copyright and related or neighboring rights to Kunquat.", "possible under law, Kunquat Affirmers have waived all # copyright and related or", "and related or neighboring rights to Kunquat. # class OrderlistManager(): def __init__(self): self._controller", "or neighboring rights to Kunquat. # class OrderlistManager(): def __init__(self): self._controller = None", "waived all # copyright and related or neighboring rights to Kunquat. # class", "rights to Kunquat. # class OrderlistManager(): def __init__(self): self._controller = None self._session =", "-*- # # Author: <NAME>, Finland 2015 # # This file is part", "def __init__(self): self._controller = None self._session = None def set_controller(self, controller): self._controller =", "the extent possible under law, Kunquat Affirmers have waived all # copyright and", "# # This file is part of Kunquat. # # CC0 1.0 Universal,", "# Author: <NAME>, Finland 2015 # # This file is part of Kunquat.", "# -*- coding: utf-8 -*- # # Author: <NAME>, Finland 2015 # #", "Finland 2015 # # This file is part of Kunquat. # # CC0", "Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers have", "extent possible under law, Kunquat Affirmers have waived all # copyright and related", "coding: utf-8 -*- # # Author: <NAME>, Finland 2015 # # This file", "OrderlistManager(): def __init__(self): self._controller = None self._session = None def set_controller(self, controller): self._controller", "# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law,", "related or neighboring rights to Kunquat. # class OrderlistManager(): def __init__(self): self._controller =", "This file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ #", "Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible", "self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self, selection): self._session.set_orderlist_selection(selection) def get_orderlist_selection(self): return", "part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the", "under law, Kunquat Affirmers have waived all # copyright and related or neighboring", "of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent", "utf-8 -*- # # Author: <NAME>, Finland 2015 # # This file is", "to Kunquat. # class OrderlistManager(): def __init__(self): self._controller = None self._session = None", "<NAME>, Finland 2015 # # This file is part of Kunquat. # #", "# To the extent possible under law, Kunquat Affirmers have waived all #", "= None def set_controller(self, controller): self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self,", "__init__(self): self._controller = None self._session = None def set_controller(self, controller): self._controller = controller", "self._session = None def set_controller(self, controller): self._controller = controller self._session = controller.get_session() def", "controller): self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self, selection): self._session.set_orderlist_selection(selection) def get_orderlist_selection(self):", "# copyright and related or neighboring rights to Kunquat. # class OrderlistManager(): def", "neighboring rights to Kunquat. # class OrderlistManager(): def __init__(self): self._controller = None self._session", "file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # #", "2015 # # This file is part of Kunquat. # # CC0 1.0", "To the extent possible under law, Kunquat Affirmers have waived all # copyright", "law, Kunquat Affirmers have waived all # copyright and related or neighboring rights", "= None self._session = None def set_controller(self, controller): self._controller = controller self._session =", "is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To", "None self._session = None def set_controller(self, controller): self._controller = controller self._session = controller.get_session()", "# # To the extent possible under law, Kunquat Affirmers have waived all", "def set_controller(self, controller): self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self, selection): self._session.set_orderlist_selection(selection)", "1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/ # # To the extent possible under law, Kunquat Affirmers", "Kunquat. # class OrderlistManager(): def __init__(self): self._controller = None self._session = None def", "# # Author: <NAME>, Finland 2015 # # This file is part of", "all # copyright and related or neighboring rights to Kunquat. # class OrderlistManager():", "copyright and related or neighboring rights to Kunquat. # class OrderlistManager(): def __init__(self):", "class OrderlistManager(): def __init__(self): self._controller = None self._session = None def set_controller(self, controller):", "self._controller = None self._session = None def set_controller(self, controller): self._controller = controller self._session", "= controller self._session = controller.get_session() def set_orderlist_selection(self, selection): self._session.set_orderlist_selection(selection) def get_orderlist_selection(self): return self._session.get_orderlist_selection()", "-*- coding: utf-8 -*- # # Author: <NAME>, Finland 2015 # # This", "# This file is part of Kunquat. # # CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/", "set_controller(self, controller): self._controller = controller self._session = controller.get_session() def set_orderlist_selection(self, selection): self._session.set_orderlist_selection(selection) def" ]
[]
[ "in_layer def build_model(): \"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1],", "= batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net architecture \"\"\" l_in =", "block with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad,", "filters in first layer # decreased by factor 2 in each block nf0", "net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1,", "pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net architecture", "name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin = elu def", "name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3,", "elu, sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy", "conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1,", "batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None,", "num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with batch normalization \"\"\"", "conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1", "conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0,", "in first layer # decreased by factor 2 in each block nf0 =", "pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1,", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1,", "filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 *", "conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with batch normalization", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # ---", "net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0,", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1,", "elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with", "pad='same', name='conv'): \"\"\" convolution block with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer,", "net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8", "* nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1,", "16 # --- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1,", "= MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin,", "from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from", "Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu,", "block nf0 = 16 # --- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3,", "batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator", "= net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 *", "from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import", "conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0,", "= conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 =", "net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "= net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 *", "Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify", "= net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 *", "filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2,", "num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv')", "name='segmentation') return net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300,", "from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from", "= 16 # --- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv')", "INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing --- net1 =", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of", "= [1, 256, 256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same',", "get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE", "pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1,", "Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 =", "factor 2 in each block nf0 = 16 # --- encoder --- net1", "= conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "import lasagne from lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer,", "nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1),", "= SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin = elu def conv_bn(in_layer, num_filters,", "filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2,", "import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import", "MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "in each block nf0 = 16 # --- encoder --- net1 = conv_bn(net1,", "lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1,", "pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder", "= TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 =", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3", "return net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2,", "num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2,", "training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False, adapt_learn_rate=get_stepwise(k=1000, factor=0.5),", "from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin", "import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3')", "net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None,", "net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3,", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin,", "* nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1,", "nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2", "name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "= conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in first", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin,", "ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin = elu def conv_bn(in_layer,", "MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin = elu", "TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1", "net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder ---", "nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000,", "lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork", "SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise", "l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing", "num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3,", "net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0,", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4", "= conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 *", "pad='same', name='segmentation') return net1 # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250,", "filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile", "conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0,", "import elu, sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from", "#!/usr/bin/env python import lasagne from lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import", "--- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers", "filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3,", "= conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1", "filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 *", "filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in first layer # decreased", "nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with batch normalization \"\"\" in_layer =", "lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates", "pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2,", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1", "net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4", "conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in first layer", "pad='same', name='color_deconv_preproc') # number of filters in first layer # decreased by factor", "# --- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 =", "name='color_deconv_preproc') # number of filters in first layer # decreased by factor 2", "net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1),", "p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8", "--- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3,", "from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from", "name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3,", "nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 =", "= MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin,", "--- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "= conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 *", "num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2,", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin,", "= lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing ---", "net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1", "name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 =", "encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0,", "batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1", "from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE =", "as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid,", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "\"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return", "net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2", "net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0,", "net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in)", "layer # decreased by factor 2 in each block nf0 = 16 #", "num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3,", "num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3,", "= conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2,", "def build_model(): \"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]),", "= ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1,", "2 in each block nf0 = 16 # --- encoder --- net1 =", "nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net", "name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1", "prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False, adapt_learn_rate=get_stepwise(k=1000,", "TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1,", "number of filters in first layer # decreased by factor 2 in each", "= ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1", "stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0,", "sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import", "nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 =", "name=name) in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net architecture \"\"\"", "net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin,", "= conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same',", "[1, 256, 256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'):", "lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing --- net1", "name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1,", "preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1,", "name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3,", "conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0,", "p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2", "by factor 2 in each block nf0 = 16 # --- encoder ---", "nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 =", "import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256,", "= Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def", "num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1')", "convolution block with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity,", "nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block", "import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork", "# number of filters in first layer # decreased by factor 2 in", "= conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2,", "get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256]", "net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "= get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False, adapt_learn_rate=get_stepwise(k=1000, factor=0.5), update_function=get_update_momentum(0.9), valid_batch_iter=get_batch_iterator(), train_batch_iter=get_batch_iterator())", "batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer =", "lasagne_wrapper.parameter_updates import get_update_momentum Network = SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin =", "from lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from", "= elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with", "= conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 *", "= conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same',", "num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in first layer #", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 =", "decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 =", "stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0,", "num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 =", "pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters", "256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 =", "num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 =", "nonlinearity=nonlin, pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2,", "net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin,", "num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training strategy train_strategy =", "with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer", "net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in", "pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1,", "= ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "# prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False,", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2')", "INPUT_SHAPE = [1, 256, 256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify,", "with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name)", "import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import", "in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer", "stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') #", "256, 256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\"", "pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1,", "= conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 *", "= conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 =", "nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 #", "rectify from lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy", "def conv_bn(in_layer, num_filters, filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with batch", "strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False, adapt_learn_rate=get_stepwise(k=1000, factor=0.5), update_function=get_update_momentum(0.9),", "= MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin,", "= Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training strategy", "conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2,", "num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return", "net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid,", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2", "stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0,", "nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number of filters in first layer # decreased by", "train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=1000, samples_per_epoch=250, patience=300, ini_learning_rate=0.2, L2=None, use_weights=False, adapt_learn_rate=get_stepwise(k=1000, factor=0.5), update_function=get_update_momentum(0.9), valid_batch_iter=get_batch_iterator(),", "TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1", "decreased by factor 2 in each block nf0 = 16 # --- encoder", "net1 = ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin,", "pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat')", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 *", "nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 =", "return in_layer def build_model(): \"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0],", "stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin,", "Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def build_model():", "nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4", "pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2,", "conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1 =", "num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') #", "# --- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3,", "net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1 = conv_bn(net1, num_filters=8 * nf0,", "import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import", "filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc') # number", "in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\" Compile net architecture \"\"\" l_in", "num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1, net1), name='concat') net1 = conv_bn(net1, num_filters=nf0,", "python import lasagne from lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer,", "nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1", "net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1", "conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2,", "num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') # --- decoder --- net1 = TransposedConv2DLayer(net1,", "SegmentationNetwork INPUT_SHAPE = [1, 256, 256] nonlin = elu def conv_bn(in_layer, num_filters, filter_size,", "lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules", "\"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # ---", "build_model(): \"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input')", "num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat') net1 =", "import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import", "pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1", "pad='same') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1,", "MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import batch_norm", "\"\"\" convolution block with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size,", "conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1", "# --- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv')", "--- decoder --- net1 = TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1", "filter_size, nonlinearity=rectify, pad='same', name='conv'): \"\"\" convolution block with with batch normalization \"\"\" in_layer", "INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1, num_filters=10,", "pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') p1 = net1 net1 =", "= TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1), name='concat')", "num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2,", "architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1 = batch_norm(l_in) #", "of filters in first layer # decreased by factor 2 in each block", "ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import batch_norm from", "net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3,", "\"\"\" Compile net architecture \"\"\" l_in = lasagne.layers.InputLayer(shape=(None, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]), name='Input') net1", "name='conv'): \"\"\" convolution block with with batch normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters,", "conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin, pad='same', name='color_deconv_preproc')", "nf0 = 16 # --- encoder --- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin,", "lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer from lasagne.nonlinearities", "Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training strategy train_strategy", "net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p2, net1),", "conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1,", "p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4", "ConcatLayer((p2, net1), name='concat') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation')", "each block nf0 = 16 # --- encoder --- net1 = conv_bn(net1, num_filters=nf0,", "stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 =", "name='Input') net1 = batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1,", "net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare training", "filter_size=3, nonlinearity=nonlin, pad='same') p3 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool3') net1", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p1,", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=nf0, filter_size=2, stride=2, name='upconv') net1", "get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum", "net1 = batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin,", "net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1, nonlinearity=nonlin,", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2,", "= TransposedConv2DLayer(net1, num_filters=4 * nf0, filter_size=2, stride=2, name='upconv') net1 = ConcatLayer((p3, net1), name='concat')", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=8 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool1') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "ConcatLayer((p3, net1), name='concat') net1 = conv_bn(net1, num_filters=4 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1", "first layer # decreased by factor 2 in each block nf0 = 16", "from lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network", "lasagne.layers import batch_norm from lasagne_wrapper.network import SegmentationNetwork from lasagne_wrapper.training_strategy import get_binary_segmentation_TrainingStrategy,get_categorical_segmentation_TrainingStrategy from lasagne_wrapper.batch_iterators", "filter_size=3, nonlinearity=nonlin, pad='same') net1 = TransposedConv2DLayer(net1, num_filters=2 * nf0, filter_size=2, stride=2, name='upconv') net1", "# decreased by factor 2 in each block nf0 = 16 # ---", "nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same')", "--- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=1, filter_size=1,", "TransposedConv2DLayer from lasagne.nonlinearities import elu, sigmoid, rectify from lasagne.layers import batch_norm from lasagne_wrapper.network", "net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2, name='pool2') net1 = conv_bn(net1, num_filters=4 * nf0,", "--- net1 = conv_bn(net1, num_filters=nf0, filter_size=3, nonlinearity=nonlin, pad='same') net1 = conv_bn(net1, num_filters=nf0, filter_size=3,", "lasagne_wrapper.batch_iterators import get_batch_iterator from lasagne_wrapper.learn_rate_shedules import get_stepwise from lasagne_wrapper.parameter_updates import get_update_momentum Network =", "num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer) return in_layer def build_model(): \"\"\"", "normalization \"\"\" in_layer = Conv2DLayer(in_layer, num_filters=num_filters, filter_size=filter_size, nonlinearity=nonlinearity, pad=pad, name=name) in_layer = batch_norm(in_layer)", "lasagne from lasagne.layers.conv import Conv2DLayer as Conv2DLayer from lasagne.layers import MaxPool2DLayer, ConcatLayer, TransposedConv2DLayer", "net1 = conv_bn(net1, num_filters=2 * nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1", "= batch_norm(l_in) # --- preprocessing --- net1 = conv_bn(net1, num_filters=10, filter_size=1, nonlinearity=nonlin, pad='same')", "* nf0, filter_size=3, nonlinearity=nonlin, pad='same') p2 = net1 net1 = MaxPool2DLayer(net1, pool_size=2, stride=2,", "pad='same') net1 = Conv2DLayer(net1, num_filters=1, filter_size=1, nonlinearity=sigmoid, pad='same', name='segmentation') return net1 # prepare" ]
[ "todo: change input activations if model changed # define input/output a_bottleneck = e_a1", "3-7L, 3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and", "cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids =", "x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch =", "for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min", "mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1)))", "gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df: {} M'.format(usage()))", "time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( #", "define input/output a_bottleneck = e_a3 elif p.L == 5: # change with layer", "input\") gene_dir = p.tag+'/genes' # genetate a list of genes using the gene_pair_list", "b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage)", "(p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'): print('data split:", "print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X,", "pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df", "3, 5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X, h,", "3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1,", "# rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L", "mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) #", "tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on", "scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2)", "activations if model changed # define input/output a_bottleneck = e_a2 elif p.L ==", "d_w3, d_b3, d_a3 if p.L == 7: # change with layer with tf.name_scope('Encoder_L1'):", "weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time))", "== 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n,", "#impute on small data blocks to avoid high memory cost n_out_batches = m//p.sample_size", "std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots using all genes", "to match Y # todo: support sparse matrix X = X.loc[Y.index, Y.columns] G", "G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this", "{}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and", "skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck,", "< 0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega )", "range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] #", "ignore ) print('generating histogram for correlations of cells between ground truth and imputation')", "= sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name,", "# subset/sort X, G to match Y # todo: support sparse matrix X", "xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min,", "Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p,", "input/output a_bottleneck = e_a3 elif p.L == 5: # change with layer with", "of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each", "= input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df)", "300 p.sample_size = int(240) print('in test mode\\n', 'num-genes set to {}, num-cells set", "and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation),", "gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir)", "Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete'", "last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder:", "mode if mode == 'pre-training': # step1/rand_init for pre-training on reference p.stage =", "usage after building the model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training", "# return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4", "3e-4 for 3-7L, 3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just", "# calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate", "INPUT SUMMARY print('\\nIn this code, matrices should have already been transformed into cell_row')", "output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode ==", "saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand Init Mode') init =", "gene for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df", "for dropout #2.2 define layers and variables # input p, X, pIn_holder, pHidden_holder,", "p.n = 300 p.sample_size = int(240) print('in test mode\\n', 'num-genes set to {},", "title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes in imputation", "step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0,", "handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_", "= input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse", "for backward support for older parameter files only # sample_size is 1000 in", "epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step == 0) or (epoch", "tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3,", "elif p.run_flag == 'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init)", "return mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd", "min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y))", "save/ the snapshot results of the current model on the whole dataset Parameters:", "took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall", "histogram for correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values,", "encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1)", "= psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1)", "time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time", "csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage()))", "def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation in each gene", "y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data =", "latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving output: ', '{} M'.format(usage()))", "== 'late': # step2/rand_init for one step training p.stage = 'step2' p.run_flag =", "read into csr, get rid of input_df gene_ids = input_df.columns cell_ids = input_df.index", "p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 =", "std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)]", "1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if", "psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1) return", "time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for", "DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation:", "files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df:", "sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and extract gene", "model changed # define input/output a_bottleneck = e_a3 elif p.L == 5: #", "p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y,", "= build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep this section in", "sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training", "matrix G: ground truth p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs'", "scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n,", "tensorflow as tf from importlib.machinery import SourceFileLoader import math import psutil import time", "'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag)", "return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters from", "tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file", "------------ X: input data matrix; genes in columns (same below) Y: imputed data", "'Help information') parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute |", "visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description =", "dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min, std_max),", "as np import pandas as pd import argparse import tensorflow as tf from", "m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size: #impute on small data", "late | translate | impute | analysis') p.mode = 'invalid' return p if", "{}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init -->", "\"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training", "in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch", "scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4.", "in: %s\" % save_path) #3.save the training and test curve if p.mse_mode in", "p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ',", "+ param_file).load_module() p.fname_input = infile p.mode = mode if mode == 'pre-training': #", "if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data=", "= X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j,", "# For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage", "variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn')", "scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed", "input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train =", "usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM", "== 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag =", "ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec)))", "NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m,", "gc.collect() # working on mac print('RAM usage after deleting input_df: {} M'.format(usage())) #", "7L:1000, 9L:3000 p.display_step = 1 # interval on learning curve p.snapshot_step = 5", "== p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA", "changed # define input/output a_bottleneck = e_a3 elif p.L == 5: # change", "Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth", "latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the # of cells is less", "columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df =", "float_format='%.6f') print('RAM usage during mini-batch imputation and saving output: ', '{} M'.format(usage())) else:", "import time from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute", "print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training", "when GradDescent used # training mse and mse_nz of the last batch mse_batch,", "cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data is:", "#?? the following sample_input is a matrix sampled randomly, and should it be", "p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 =", "d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed # define", "IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and", "p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2", "input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df", "build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0, 1", "input MSE between imputation and ground truth Parameters ------------ X: input data matrix;", "0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices should have already been transformed", "param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input =", "restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode')", "------------ p: parameters from global_params.py and example.py Return ----------- None ''' # load", "of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\")", ").transpose() # [genes, cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') #", "= tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('***", "p.learning_rate = 0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval'", "Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\\n>", "default; if sample_size is less than the number of cells (m), # we", "calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and input MSE between imputation", "memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with", "usage during mini-batch imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None)", "'translate': # step2/load_saved from step1, for transfer learning p.stage = 'step2' # step1/step2", "# for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) #", "Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells]", "p): '''calculate and visualize standard deviation in each gene write SDs to files", "Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:',", "= input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall =", "', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input),", "# and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship", "= np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :]", "print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X =", "rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx]", "def load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py and example.py Return", "matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values,", "read data and extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids =", "batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2", "mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step == 0) or", "sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids", "Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation", "Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for", "== 0): tic_log = time.time() print('#Epoch {} took: {} CPU seconds; {} Wall", "xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)", "and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else:", "cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx,", "gene in pair] for j in gene_list: try: print('for ', j) Y_j =", "epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [],", "Correlations between ground truth and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension:", "epoch (0) #save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train", ") df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck,", "now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n,", "G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first'", "calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize results", "dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j in", "and save imputation results for an input matrix at the 'impute' mode. If", "= [], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [],", "plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y,", "== 0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if", "large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save", "mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation", "= calculate_MSEs(X, Y, G) # calculate and visualize variation in genes analyze_variation_in_genes(X, Y,", "G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations of genes between ground", "return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) #", "range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df,", "G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID", "step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate =", "title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max,", "scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p):", "transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000 * 300).", "+ '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and", "\"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids)", "to display based on mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode)", "used # training mse and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch", "scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo:", "print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in range(1,", "p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model", "load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids,", "p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step',", "d_b1, pHidden_holder) # todo: change input activations if model changed # define input/output", "p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec,", "#print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and extract gene IDs", "p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode", ") latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 }", "columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values,", "std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD', title='',", "Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting", "print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log,", "fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model", "= scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with", "pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4,", "-u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step:", "p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'):", "mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch /", "scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells',", "stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight =", "of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each", "e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3,", "import argparse import tensorflow as tf from importlib.machinery import SourceFileLoader import math import", "log_dir, rand_state=3): ##0. read data and extract gene IDs and cell IDs input_matrix,", "{} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------", "session, imputation p.m = 1000 p.n = 300 p.sample_size = int(240) print('in test", "gc.collect() else: # For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)", "cells is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1,", "IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and save indexes #input", "sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0})", "e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2',", "reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer -->", "columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix,", "Discretize gene expression values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n>", "data matrix; genes in columns (same below) Y: imputed data matrix G: ground", "title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j,", "##2. model training and validation #2.1 init --> keep this in the main", "d_a2, d_w3, d_b3, d_a3 if p.L == 7: # change with layer with", "of data matrices print('\\n> Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values,", "mode options: pre-training | late | translate | impute | analysis') p.mode =", "blocks to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 =", "p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data", "verified when GradDescent used # training mse and mse_nz of the last batch", "and extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1.", "= scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage()))", "sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X:", "print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if", "snapshot results of the current model on the whole dataset Parameters: ----------- '''", "print('save weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name", "pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code =", "range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation),", "seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2)", "for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore", "# 0 if 0, 1 if > 0; not possibly < 0 in", "input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids,", "h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('>", "the main tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32, [None, n],", "= e_a2 elif p.L == 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1,", "Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y,", "mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid,", "= pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1})", "# input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) #", "latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f')", "p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input =", "mse and mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid =", "pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path", "input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test,", "validation mse and mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid", "p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids,", "print(\"> Impute and save.. \") if m > p.large_size: Y_input_df = fast_imputation(sess, h,", "# with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) #", "# todo: change input activations if model changed # define input/output a_bottleneck =", "if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) #", "scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag)", "fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data is: {}", "= scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2,", "finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise", "OR NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G =", "= int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [],", "#print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode,", "p.mode = mode if mode == 'pre-training': # step1/rand_init for pre-training on reference", "| late | translate | impute | analysis') parser.add_argument('-infile', help='file path of input", "latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } )", "Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y, G):", "List for gene in pair] for j in gene_list: try: print('for ', j)", "gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files (hd5,", "print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs imputation, ground truth vs input\")", "= 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage),", "= tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse,", "number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:',", "input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test", "Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In", "Return ----------- None ''' # histograms of gene expression max_expression = max(G.values.max(), X.values.max(),", "variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state)", "2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f}", "it be a matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size,", "ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p,", "= tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2", "X, h, p # return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X,", "p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1", "sys import os import numpy as np import pandas as pd import argparse", "'''analyzing imputation output Parameters ------------ p: parameters from global_params.py and example.py Return -----------", "print('RAM usage during whole data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df,", "cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx =", "header=None) handle2.close() else: # if m the # of cells is less than", "# if m the # of cells is less than large_size (1e5)) Y_input_arr", "[], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [], []", "csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 '''", "title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j,", "0, 1 if > 0; not possibly < 0 in our data mse_nz", "round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G)", "print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs", "ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation)", "from global_params.py and example.py Return ----------- None ''' # load imputation results and", "mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _", "e_a1 else: raise Exception(\"{} L not defined, only 3, 5, 7 implemented\".format(p.L)) h", "= sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\",", "if m the # of cells is less than large_size (1e5)) Y_input_arr =", "Exception('mse_mode spelled wrong') #2.5 Init a session accoding to the run_flag sess =", "input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading", "and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram", "= scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y # todo: support", "with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3,", "gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix: {}", "= 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in", "elif p.L == 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 =", "n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters", "e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1',", "gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs", "= 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name,", "sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a", "range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1", "each gene write SDs to files plot histograms of SDs Parameters ------------ X:", "support for older parameter files only # sample_size is 1000 in default; if", "DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X =", "x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(),", "sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights()", "# step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'translate': #", "'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list)", "less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1})", "(epoch == 1) or (epoch % p.display_step == 0): tic_log = time.time() print('#Epoch", "1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\",", "= 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight", "histogram for correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values,", ":].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled randomly,", "usage after deleting input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _", "print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs", "visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode',", "imputation and ground truth # and of correlations between cells in imputation and", "tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir", "#print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') # define", "def visualize_selected_genes(X, Y, G, p): ''' generate plots for genes specified by the", "the 'impute' mode. If the number of cells is larger than a threshold", "G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() #", "print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when", "#if the input matrix is large (m > p.large_size), only save the #imputation", "sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20)", "m, n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells, {}", "with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data=", "print('Valid mode options: pre-training | late | translate | impute | analysis') p.mode", "std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots using all genes Parameters", "(1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full =", "sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx,", "if > 0; not possibly < 0 in our data mse_nz = tf.reduce_mean(", "l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight,", "index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code,", "vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) +", "d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder)", "0 if 0, 1 if > 0; not possibly < 0 in our", "p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): #", "y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio'])", "= './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids =", "optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer =", "sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage", "p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the input matrix is large", "p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation)", "= cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input,", "each gene for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df,", ").set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves", "vs imputation, ground truth vs input\") gene_dir = p.tag+'/genes' # genetate a list", "p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5", "__name__ == '__main__': ##1. load parameter module and use name 'p' #print(\"Usage: python", "high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w')", "is 1000 in default; if sample_size is less than the number of cells", "elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute'", "for j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j", "accoding to the run_flag sess = tf.Session() # restore variables saver = tf.train.Saver()", "seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1,", "usage after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA", "X, G to match Y # todo: support sparse matrix X = X.loc[Y.index,", "mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth:", "columns (same below) Y: imputed data matrix G: ground truth p: parameters Return", "pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns,", "Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p):", "as pd import argparse import tensorflow as tf from importlib.machinery import SourceFileLoader import", "= max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation',", "scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground", "sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T,", "', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------ p: Return ----------- '''", "model changed # define input/output a_bottleneck = e_a2 elif p.L == 3: #", "costruct the trainer --> keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate)", "in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie:", "build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep this section in the", "# define input/output a_bottleneck = e_a2 elif p.L == 3: # change with", "directly read into csr, get rid of input_df gene_ids = input_df.columns cell_ids =", "saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess,", "of m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size: #impute on small", "if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term)", "usage())) # Test or not: m*n subset (1000 * 300). Delete later if", "input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo: directly read", "transfer learning p.stage = 'step2' # step1/step2 (not others) p.run_flag = 'load_saved' #", "p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for correlations of", "define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir +", "np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect()", ") print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder,", "= input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b,", "int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], []", "in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode", "only save the #imputation results of a small sample set (sample_input) print(\"> Impute", "std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max())", "in genes analyze_variation_in_genes(X, Y, G, p) # visualize results using all genes visualize_all_genes(X,", "mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3))", "= 5 # interval of saving session, imputation p.m = 1000 p.n =", "= cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format(", "results of m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size: #impute on", "pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids)", "pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)]", "format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log", "3-7L, 3e-5 for 9L elif mode == 'translate': # step2/load_saved from step1, for", "SDs to files plot histograms of SDs Parameters ------------ X: input data matrix;", "run_flag sess = tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag ==", "ground truth p: parameters Return ----------- None ''' # histograms of gene expression", "Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir)", "pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df", "print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min", "stage, w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr", "# d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4',", "skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 /", "= cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage,", "#2.7.3 save snapshot step if (epoch % p.snapshot_step == 0) or (epoch ==", "for 3-7L, 3e-6 for 9L elif mode == 'late': # step2/rand_init for one", "imputation, ground truth vs input\") gene_dir = p.tag+'/genes' # genetate a list of", "genes in columns (same below) Y: imputed data matrix G: ground truth '''", "coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0, 1 if >", "toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) #", "csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) #", "# define input/output a_bottleneck = e_a3 elif p.L == 5: # change with", "d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3,", "# print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training mse and", "+ '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same',", "Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs", "X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(),", "m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids)", "sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid,", "Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes in imputation and", "data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h,", "genes using the gene_pair_list gene_list = [gene for pair in List for gene", "indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape", "and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode", "[], [] mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h) #msej_batch_vec,", "is large (m > p.large_size), only save the #imputation results of a small", "n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder =", "mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz,", "mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu,", "tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth", "# def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input)", "if sample_size is less than the number of cells (m), # we reconstruct", "of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate)", "p, log_dir, rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop - tic_start),", "using the gene_pair_list gene_list = [gene for pair in List for gene in", "def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate", "Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir", "''' # histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression =", "Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7)", "input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo:", "scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz =", "c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train,", "step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2:", "max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y is{}'.format(max_y,", "+ '\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize", "print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {}", "1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid)", "read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input ==", "sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled randomly, and", "np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def", "'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage,", "tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3", "of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz,", "p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1", "= 'Eval' p.stage = 'Eval' else: print('The mode you entered cannot be recognized.')", "# or ignore ) print('generating histogram for correlations of cells between ground truth", "print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X:", "{} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA", "continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth',", "n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n,", "genes analyze_variation_in_genes(X, Y, G, p) # visualize results using all genes visualize_all_genes(X, Y,", "expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input),", "+ '/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) //", "results #if the input matrix is large (m > p.large_size), only save the", "\"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save the training and test curve", "floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [],", "Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max,", "tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1,", "= input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage()))", "p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results", "scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx]", "= cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids", "Impute and save.. \") if m > p.large_size: Y_input_df = fast_imputation(sess, h, X,", "# x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder:", "pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 #", "epoch = 0 #2.6. pre-training epoch (0) #save imputation results before training steps", "g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data=", "p): ''' generate plots for genes specified by the user Parameters ------------ X:", "n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input", "define input/output a_bottleneck = e_a2 elif p.L == 3: # change with layer", "G, p) # visualize results using all genes visualize_all_genes(X, Y, G, p) #", "pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3',", "mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\")", "a list of genes using the gene_pair_list gene_list = [gene for pair in", "[] mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec", "h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3", "at the 'impute' mode. If the number of cells is larger than a", "Y: imputed data matrix G: ground truth p: parameters Return ----------- None '''", "pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3',", "= np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage))", "= read_data(p) ##1. split data and save indexes #input p, input_matrix, cell_ids #return", "pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and saving output: ', '{}", "import math import psutil import time from scipy.sparse import csr_matrix import gc import", "sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2", "mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file '''", "= 3) toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\")", "def late_main(p, log_dir, rand_state=3): ##0. read data and extract gene IDs and cell", "tic_log2 = time.time() #1.save imputation results #if the input matrix is large (m", "= scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4,", "of saving session, imputation p.m = 1000 p.n = 300 p.sample_size = int(240)", "scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input),", "d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7: #", "toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\") print(\"Wall Time", "DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans()", "= 0 #2.6. pre-training epoch (0) #save imputation results before training steps print(\"Evaluation:", "sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch,", "# step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode == 'late': #", "Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _,", "genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth", "step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode == 'late': # step2/rand_init", "transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y # todo: support sparse matrix", "visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr = sess.run(w)", "argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start", "5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log,", "recognized.') print('Valid mode options: pre-training | late | translate | impute | analysis')", "# return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2,", "mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep this", "number of cells is larger than a threshold (large_size: 1e5), save results of", "7) print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2", "', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j", "input activations if model changed # define input/output a_bottleneck = e_a1 else: raise", "Exception(\"{} L not defined, only 3, 5, 7 implemented\".format(p.L)) h = d_a1 return", "loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess,", "in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete)", "Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try:", "sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage),", "# calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize", "get rid of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before", "X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y", "mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch", "pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir", "match Y # todo: support sparse matrix X = X.loc[Y.index, Y.columns] G =", "output Parameters ------------ p: parameters from global_params.py and example.py Return ----------- None '''", "min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy # do python", "scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise',", "p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1):", ":4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape # m: n_cells;", "tSNE plots print('\\n> Generating PCA and tSNE plots') if p.cluster_file is not None:", "title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate", "3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'impute':", "Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log,", "todo: directly read into csr, get rid of input_df gene_ids = input_df.columns cell_ids", "X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save", "encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1)", "scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X =", "change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1", "columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids,", "test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx]", "sess.graph) # prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) #", "print('MSE1 between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz =", "# floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec =", "1e5), save results of m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size:", "model training and validation #2.1 init --> keep this in the main tf.reset_default_graph()", "we reconstruct the training and validation sets by randomly sampling. try: p.sample_size sample_size", "x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall", "X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT", "Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] #", "X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving", "matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves", "round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__',", "{}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) #", "in imputation and ground truth # and of correlations between cells in imputation", "imputed data matrix G: ground truth p: parameters Return ----------- None ''' print('\\n", "''' calculate and save imputation results for an input matrix at the 'impute'", "raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding to the run_flag sess", "# Test or not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n]", "title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n>", "None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag)", "of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model", "list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground", "= 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder,", "input is used as ground truth print('\\n> Correlations between ground truth and imputation')", "p, m): ''' calculate and save imputation results for an input matrix at", "return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state =", "translate | impute | analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv)", "save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2", "steps (epochs) if (epoch == 1) or (epoch % p.display_step == 0): tic_log", "epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx),", "tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete'", "Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min", "feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch,", "None ''' # histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression", "sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training mse and mse_nz of", "e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2,", "latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and saving output:", "mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{};", "cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if", "define input/output a_bottleneck = e_a1 else: raise Exception(\"{} L not defined, only 3,", "scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir", "m): ''' calculate and save imputation results for an input matrix at the", "# for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load", "print('RAM usage after reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..')", "p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode", "dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes',", "else: # if m the # of cells is less than large_size (1e5))", "= tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch =", "p: parameters Return ----------- None ''' # histograms of gene expression max_expression =", "## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m)", "'/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size))", "= sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights()", "Return ----------- None ''' # load imputation results and input data X, Y,", "test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect()", "cell_ids = input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() #", "for 9L elif mode == 'translate': # step2/load_saved from step1, for transfer learning", "for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx,", "input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(),", "= infile p.mode = mode if mode == 'pre-training': # step1/rand_init for pre-training", "mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return", "p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L,", "print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix =", "should it be a matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m),", "#4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1,", "deviation in each gene for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X,", "# when ground truth is not provide, # input is used as ground", "1 if > 0; not possibly < 0 in our data mse_nz =", "range=[min_expression, max_expression]) # histograms of correlations between genes in imputation and ground truth", "n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for", "'d_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name)))", "scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1)", "n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input", "300). Delete later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids", "= tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse =", "p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n>", "- h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def", "data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj", "= optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a session", "sess = tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved':", "= sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df", "input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid =", "be a matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m))", "mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log -", "validation sets by randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size =", "= pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else", "cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage),", "in gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:,", "p: parameters Return ----------- None ''' print('\\n calculating standard deviation in each gene", "keep this in the main tf.reset_default_graph() # define placeholders and variables X =", "Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs", "later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n]", "'/' + param_file).load_module() p.fname_input = infile p.mode = mode if mode == 'pre-training':", "interval of saving session, imputation p.m = 1000 p.n = 300 p.sample_size =", "optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding", "version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and extract", "mse_valid_vec = [], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [],", "d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7: # change with layer", "<xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2.", "p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis':", "tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call", "if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else:", "p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number", "pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder,", "header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the # of cells is", "M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the #", "vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD", "input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input,", "gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder,", "module and use name 'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms", "mem usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test mode')", "M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used #", "g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df,", "np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for i in range(num_batch): #", "gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or not:", "and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step))", "p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..')", "= pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G", "# input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h", "(1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0,", "ground truth # and of correlations between cells in imputation and ground truth", "mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step ==", "d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed #", "for 3-7L, 3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute", "encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def", "X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)", "a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ---------- ''' if", "and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G)", "a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame(", "data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder:", ")) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage()))", "Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError:", "time.time() #2.7.2 save the results of epoch 1 and all display steps (epochs)", "of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df:", "rand_state = 3): #5.2 define layers and variables # input p, X, pIn_holder,", "ram = round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:',", "e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd)", "p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for", "print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or", "mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz,", "or not: m*n subset (1000 * 300). Delete later if p.test_flag: print('in test", "MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in", "csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage,", "read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop", "d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1,", "bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage),", "'num-genes set to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size set to", "time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for i", "plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y,", "1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:',", "a_bottleneck = e_a1 else: raise Exception(\"{} L not defined, only 3, 5, 7", "standard deviation in each gene for input and imputed matrix') x_std_df, y_std_df =", "save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation", "training model on mini-batches for i in range(num_batch): # x_batch indices = np.arange(p.batch_size", "df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving output: ',", "parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if __name__ == '__main__': ##1.", "int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train", "MSE(X, h), for genej, nz_cells print('RAM usage after building the model is: {}", "#2.6. pre-training epoch (0) #save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch)", "eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:',", "dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms", "G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2", "sampled randomly, and should it be a matrix containing # sample_training and sample_valid", "and validation #2.1 init --> keep this in the main tf.reset_default_graph() # define", "= pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec,", "fmt='%s') print('RAM usage after splitting input data is: {} M'.format(usage())) # todo: for", "try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if sample_size < m:", "= tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for", "p.reg_coef) #2.4 costruct the trainer --> keep this section in the main optimizer", "sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess,", "d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 =", "data is: {} M'.format(usage())) # todo: for backward support for older parameter files", "= df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder:", "i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch,", "and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids =", "pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..')", "saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\")", "Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag)", "= cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s')", "input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix:", "d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder)", "keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega',", "e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4,", "init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode loading", "TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {}", "print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle:", "= int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train)))", "if m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids,", "files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix", "scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between", "= tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz", "dir=p.tag) # PCA and tSNE plots print('\\n> Generating PCA and tSNE plots') if", "= m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w')", "dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch',", "exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth',", "# change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd)", "if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids", "== 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('***", "Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage", "gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids", "dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch',", "9L elif mode == 'late': # step2/rand_init for one step training p.stage =", "print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag)", "imputation results for an input matrix at the 'impute' mode. If the number", "p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage,", "report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py'", "X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop", "histograms of SDs Parameters ------------ X: input data matrix; genes in columns (same", "zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0", "if mode == 'pre-training': # step1/rand_init for pre-training on reference p.stage = 'step1'", "''' # load imputation results and input data X, Y, G = load_results(p)", "pre-training | late | translate | impute | analysis') p.mode = 'invalid' return", "during whole data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage))", "'folds'. Parameters ---------- ''' if m > p.large_size: #impute on small data blocks", "return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize", "each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save", "\"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage))", "G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options:", "validation #2.1 init --> keep this in the main tf.reset_default_graph() # define placeholders", "p.large_size), only save the #imputation results of a small sample set (sample_input) print(\">", "dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for correlations of cells", "python late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile)", "G: ground truth p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List", "= scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ',", "p.stage = 'Eval' else: print('The mode you entered cannot be recognized.') print('Valid mode", "with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size", "randomly, and should it be a matrix containing # sample_training and sample_valid rand_idx", "round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {},", "= e_a3 elif p.L == 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1,", "(same below) Y: imputed data matrix G: ground truth ''' # print('>READING DATA..')", "= p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground", "pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout", "data and extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p)", "tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In", "scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir", "transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X", "X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices should have already been", "x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data =", "input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller", "cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info,", "main tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input')", "p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2,", "p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 #", "using all genes visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X, Y,", "h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3,", "step1, for transfer learning p.stage = 'step2' # step1/step2 (not others) p.run_flag =", "import pandas as pd import argparse import tensorflow as tf from importlib.machinery import", "p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1,", "pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data", "decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights", "if __name__ == '__main__': ##1. load parameter module and use name 'p' #print(\"Usage:", "e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4',", "input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return", "p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input)", ") print('generating histogram for correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr(", "y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } )", "end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder:", "3-7L, 3e-6 for 9L elif mode == 'late': # step2/rand_init for one step", "below) Y: imputed data matrix G: ground truth Return ----------- 4 MSEs '''", "sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids", "gene_list = [gene for pair in List for gene in pair] for j", "i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx)", "sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init", "correlations between cells in imputation and ground truth # when ground truth is", "Y.shape) print('generating histogram for correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr(", "not: m*n subset (1000 * 300). Delete later if p.test_flag: print('in test mode')", "late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time() time_finish", "import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec,", "tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now:", "training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0,", "Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground", "{} M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000 * 300). Delete", "# histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(),", "gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop", "feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code,", "open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1):", "xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df(", "data matrix G: ground truth Return ----------- 4 MSEs ''' print('\\n> MSE Calculation')", "= scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) # Data Transformation", "learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: -----------", "change input activations if model changed # define input/output a_bottleneck = e_a2 elif", "cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not", "+ '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j,", "exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground", "p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder:", ") scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input',", "model on mini-batches for i in range(num_batch): # x_batch indices = np.arange(p.batch_size *", "1.0} ) # validation mse and mse_nz of the sample validation set (1000)", "scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in", "reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term)", "(hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {}", "ground truth Return ----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y =", "'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse", "ram = process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1) return ram", "[], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] #", "usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large", "genes in columns (same below) Y: imputed data matrix G: ground truth p:", "Y, G, p): ''' generate plots using all genes Parameters ------------ X: input", "len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init --> keep", "X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag:", "pIn_holder: 1.0} ) # validation mse and mse_nz of the sample validation set", "return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module and use name", "is less than the number of cells (m), # we reconstruct the training", "Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j] except", "folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids,", "p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense()", "b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1", "visualize results using all genes visualize_all_genes(X, Y, G, p) # visualize selected genes", "log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for", "# PCA and tSNE plots print('\\n> Generating PCA and tSNE plots') if p.cluster_file", "a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define", "raise Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph)", "data matrices print('\\n> Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values,", "results of the current model on the whole dataset Parameters: ----------- ''' Y_input_arr", "mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and", "(same below) Y: imputed data matrix G: ground truth Return ----------- 4 MSEs", "sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input", "time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch)", "p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time()", "omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef", "g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max", "scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'):", "{} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) ))", "below) Y: imputed data matrix G: ground truth p: parameters Return ----------- None", "on the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder:", "2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) *", "print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ", "large (m > p.large_size), only save the #imputation results of a small sample", "Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag: print('in", "p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix", "Y, G): '''calculate MSEs MSE between imputation and input MSE between imputation and", "p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1,", "scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input',", "pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2',", "= csr_matrix(input_df) # todo: directly read into csr, get rid of input_df gene_ids", "feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder:", "[] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej", "[] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [], [] #", "decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage,", "== 'translate': # step2/load_saved from step1, for transfer learning p.stage = 'step2' #", "Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between", "# and of correlations between cells in imputation and ground truth # when", "2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1,", "= int(240) print('in test mode\\n', 'num-genes set to {}, num-cells set to {}\\n'.format(p.n,", "the model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save", "pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of the current", "= scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4,", "tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4", "3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7.", "'''for file in {0}/*npy # do python -u weight_clustmap.py $file {0} # done'''.format(p.stage)", "deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation',", "float(2 ** 20) ram = round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path',", "= fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save", "elif p.L == 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 =", "for i in range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch", "#std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0", "x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn,", "to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return", "if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse", "scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip", "#msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for genej, nz_cells", "0): tic_log = time.time() print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format(", "print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics", "global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1,", "print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute':", "visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid", ")) ##2. model training and validation #2.1 init --> keep this in the", "scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'):", "Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand Init Mode') init", "Y # todo: support sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index,", "in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch", "sample_size is less than the number of cells (m), # we reconstruct the", "= gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv,", "1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3),", "Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h,", "containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx,", "validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid,", "cell_ids, p, m): ''' calculate and save imputation results for an input matrix", "X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation,", "def visualize_all_genes(X, Y, G, p): ''' generate plots using all genes Parameters ------------", "scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells',", "for transfer learning p.stage = 'step2' # step1/step2 (not others) p.run_flag = 'load_saved'", "nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log,", "[], [] # msej = MSE(X, h), for genej, nz_cells print('RAM usage after", "M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage after deleting input_df: {}", "elif mode == 'late': # step2/rand_init for one step training p.stage = 'step2'", "sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx]", "mac print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\",", "= round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1,", "data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p,", "e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1,", "cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py and example.py", "with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X,", "vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices", "scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots", "not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None", "sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch =", "in columns (same below) Y: imputed data matrix G: ground truth ''' #", "np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data is: {} M'.format(usage()))", "else: raise Exception(\"{} L not defined, only 3, 5, 7 implemented\".format(p.L)) h =", "d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 =", "= cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense()", "imputation results #if the input matrix is large (m > p.large_size), only save", "p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids", "''' if m > p.large_size: #impute on small data blocks to avoid high", "d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2", "h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define loss", "scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots for", "test mode\\n', 'num-genes set to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size", "e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1", ":p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo: directly read into", "from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log,", "input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids", "scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec,", "todo: for backward support for older parameter files only # sample_size is 1000", "save the results of epoch 1 and all display steps (epochs) if (epoch", "e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2,", "= param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile", ") if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch", "= b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for", "= round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y,", "3e-5 for 9L elif mode == 'translate': # step2/load_saved from step1, for transfer", "p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,", "input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path =", "'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif", "print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}:", "elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth)", "mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec =", "print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA", "xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max),", "read_data(p) ##1. split data and save indexes #input p, input_matrix, cell_ids #return cell_ids_train,", "G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first'", "xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes in", "the # of cells is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X:", "input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f}", "between ground truth and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ',", "imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz],", "[] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec,", "'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5", "mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation", "print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train)", "Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7)", "gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j]", "p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(),", "round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G)", "visualize standard deviation in each gene write SDs to files plot histograms of", "print('\\nIn this code, matrices should have already been transformed into cell_row') print('Y (imputation):',", "imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag,", "ylabel='Input', dir=gene_dir ) # Discretize gene expression values # and re-generate pairwise plots", "gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM", "scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(),", "def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the", "or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the input", "for correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation", "= b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def", "#2.4 costruct the trainer --> keep this section in the main optimizer =", "input/output a_bottleneck = e_a2 elif p.L == 3: # change with layer with", "(imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3])", "mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and visualize variation in", "{}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3):", "session accoding to the run_flag sess = tf.Session() # restore variables saver =", "mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth:", "p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G", "usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n subset", "display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input)", "1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation", "in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False)", "# todo: directly read into csr, get rid of input_df gene_ids = input_df.columns", "others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for", "b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1,", "print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for", "n, rand_state = 3) #2.3 define loss # input X, h, p #", "is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in", "p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input data is: {} M'.format(usage())) #", "learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)',", "Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7)", "''' generate plots for genes specified by the user Parameters ------------ X: input", "9L elif mode == 'translate': # step2/load_saved from step1, for transfer learning p.stage", "10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix =", "feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during", "print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n')", "0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn", "mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip )", "# todo: for backward support for older parameter files only # sample_size is", "'Eval' else: print('The mode you entered cannot be recognized.') print('Valid mode options: pre-training", "for correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation", "feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and mse_nz of", "cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info,", "of correlations between genes in imputation and ground truth # and of correlations", "is larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters", "#toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag", "If the number of cells is larger than a threshold (large_size: 1e5), save", "specified by the user Parameters ------------ X: input data matrix; genes in columns", "1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck,", "np import pandas as pd import argparse import tensorflow as tf from importlib.machinery", "not provide, # input is used as ground truth print('\\n> Correlations between ground", "selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help", "saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save the training", "mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse,", "p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'):", "impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ##", "truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth,", "rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L elif", "standard deviation in each gene write SDs to files plot histograms of SDs", "tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx)", "p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if sample_size < m: np.random.seed(1)", "a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3)", "save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished')", "= sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log", "deleting input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20,", "#5.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n #", "def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0,", "purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio'])", "min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:', min_expression)", "do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now:", "List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs", "p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:',", "d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3',", "scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag)", "input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on", "('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode ==", "p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results of", "after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000", "tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid',", "Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute", "dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected", "p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3,", "print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------ p: Return -----------", "format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for correlations", "scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List", "layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:',", "matrix sampled randomly, and should it be a matrix containing # sample_training and", "input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after", "save results of m//p.sample_size 'folds'. Parameters ---------- ''' if m > p.large_size: #impute", "sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation", "ground truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j)", "X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs", "pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G =", "in {0}/*npy # do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # )", "sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense()", "else: raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding to the run_flag", "= tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode loading \"step2.ckpt\"..')", "parameter module and use name 'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\")", "e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L", "Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD',", "''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {},", "X) mse1 = round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz", "split data and save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test", "# of cells is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(),", "sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size,", "SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile p.mode = mode if", "p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode", "# working on mac print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary", "##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data", "= pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X:", "in each gene write SDs to files plot histograms of SDs Parameters ------------", "= scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) #", "= scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with", "files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('>", "= cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid:", "p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3", "from global_params.py and example.py Return ----------- X: input data matrix; genes in columns", "input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense()", "pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results of epoch", "Test or not: m*n subset (1000 * 300). Delete later if p.test_flag: print('in", "Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max", "index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in", "// p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #,", "'pre-training') or (p.mode == 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b,", "columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms", "read_data(p): '''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage", "= scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation,", "Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '),", "Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder:", "and example.py Return ----------- X: input data matrix; genes in columns (same below)", "= input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For", "ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) #", "vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output", "''' print('\\n calculating standard deviation in each gene for input and imputed matrix')", "for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage", "'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5", "tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define", "epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch:", "in columns (same below) Y: imputed data matrix G: ground truth p: parameters", "p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\")", "print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground", "truth print('\\n> Correlations between ground truth and imputation') print('ground truth dimension: ', G.shape,", "== '__main__': ##1. load parameter module and use name 'p' #print(\"Usage: python late.py", "took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board", "= saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save the", "input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir,", "= pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values,", "cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid,", "imputation p.m = 1000 p.n = 300 p.sample_size = int(240) print('in test mode\\n',", "= 1000 p.n = 300 p.sample_size = int(240) print('in test mode\\n', 'num-genes set", "= np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for i in range(num_batch):", "mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [], [] # mse =", "reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep this section", "p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid,", "# # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd)", "= p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List,", "round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in", "int(240) print('in test mode\\n', 'num-genes set to {}, num-cells set to {}\\n'.format(p.n, p.m),", "MSE between imputation and ground truth Parameters ------------ X: input data matrix; genes", "std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max =", "csr, get rid of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage", "3e-6 for 9L elif mode == 'late': # step2/rand_init for one step training", "pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code", "e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # #", "correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for", "end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1", "G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag: print('in test mode') Y", "toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results of epoch 1 and", "a session accoding to the run_flag sess = tf.Session() # restore variables saver", "e_a2 elif p.L == 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1", "* coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report", "input data matrix; genes in columns (same below) Y: imputed data matrix G:", "else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns,", "else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the # of", "Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p):", "seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per", "X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids,", "save_path) #3.save the training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch", "valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define", "# validation mse and mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid,", "# we reconstruct the training and validation sets by randomly sampling. try: p.sample_size", "# to do: modify to display based on mode # def display_params(p): #", "float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving output: ', '{}", "/ 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step))", "len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size,", "tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag", "pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1',", "(ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape)", "scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz)", "files plot histograms of SDs Parameters ------------ X: input data matrix; genes in", "dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground", "tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4", "print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth", "mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip:", "# m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix,", "= [], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], []", "print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations", "= process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1) return ram #", "during mini-batch imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2,", "p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test", "cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids),", "model print('> Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in:", "G) # calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G, p) #", "print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)',", "m > p.large_size: #impute on small data blocks to avoid high memory cost", "of the current model on the whole dataset Parameters: ----------- ''' Y_input_arr =", "''' generate plots using all genes Parameters ------------ X: input data matrix; genes", "usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent", "= std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for y,", "mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The mode you", "print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late') or", "x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)]", "of genes using the gene_pair_list gene_list = [gene for pair in List for", "changed # define input/output a_bottleneck = e_a1 else: raise Exception(\"{} L not defined,", "title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y,", "elif p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term) else:", "3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode == 'late':", "h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid:", "'), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth", "[mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse", "= ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X:", "scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G =", ":] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu,", "= round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y,", "Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for", "usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram =", "mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _", "vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth", "gene_dir = p.tag+'/genes' # genetate a list of genes using the gene_pair_list gene_list", "'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag = 'impute'", "-infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0)", "print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth)", "| translate | impute | analysis') p.mode = 'invalid' return p if p.test_flag:", "heatmaps of data matrices print('\\n> Generating heatmaps of data matrices') range_max, range_min =", "Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max),", "mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0,", "w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr =", "number of cells (m), # we reconstruct the training and validation sets by", "and should it be a matrix containing # sample_training and sample_valid rand_idx =", "X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of", "for pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4", "def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from global_params.py and example.py", "= 3) #2.3 define loss # input X, h, p # return mse_nz,", "data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module and use", "e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 =", "= sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr,", "parameters from global_params.py and example.py Return ----------- X: input data matrix; genes in", "'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input", "#learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 /", "activations if model changed # define input/output a_bottleneck = e_a1 else: raise Exception(\"{}", "step2/rand_init for one step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate =", "dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes in imputation and ground", "G) mse2 = round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2) return", "tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0, 1 if > 0; not", "G: ground truth Return ----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y", "= p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X,", "skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch',", "epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug #", "= d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega =", "ground truth and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape)", "i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch", "p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init':", "generate plots using all genes Parameters ------------ X: input data matrix; genes in", "e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4,", "X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir", "to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2", "skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,", "scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1,", "ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer,", "infile p.mode = mode if mode == 'pre-training': # step1/rand_init for pre-training on", "in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on", "pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1',", "curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage,", ") # validation mse and mse_nz of the sample validation set (1000) mse_valid,", "cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters", "transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose()", "process.memory_info()[0] / float(2 ** 20) ram = round(ram, 1) return ram # sys.path.append('./bin')", "> p.large_size), only save the #imputation results of a small sample set (sample_input)", "= scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1,", "for 9L elif mode == 'late': # step2/rand_init for one step training p.stage", "return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct", "nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log,", "h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} )", "modify to display based on mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:',", "end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch =", "round(mse2, 7) print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz,", "genes in imputation and ground truth # and of correlations between cells in", "= 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode ==", "vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA", "index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data,", "p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with", "pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and", "input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save imputation results for an", "1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ ==", "= 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name)))", "#2.5 Init a session accoding to the run_flag sess = tf.Session() # restore", "time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') #", "omega = tf.sign(X) # 0 if 0, 1 if > 0; not possibly", "pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None)", "index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating", "try: print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j", "the 'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py')", "should have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20,", "till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p,", "vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ =", "learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)',", "= Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT", "handle2.close() else: # if m the # of cells is less than large_size", "----------- None ''' # load imputation results and input data X, Y, G", "[mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time()", "= time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err')", "'Eval' p.stage = 'Eval' else: print('The mode you entered cannot be recognized.') print('Valid", "TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand Init Mode')", "h = d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega", "larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ----------", "e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3,", "TEST MODE OR NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n]", "between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs", "values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair", "x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x", "title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag)", "input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid", "print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden)", "return X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation", "and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G,", "ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '),", "'d_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save", "global_params.py and example.py Return ----------- None ''' # load imputation results and input", "input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for", "cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {},", "= tf.sign(X) # 0 if 0, 1 if > 0; not possibly <", "= scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G", "p, n, rand_state = 3): #5.2 define layers and variables # input p,", "mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess,", "title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate", "input activations if model changed # define input/output a_bottleneck = e_a2 elif p.L", "dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth',", "a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time()", "print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training mse and mse_nz", "M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000 * 300). Delete later", "----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning", "5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save", "5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X:", "_, \"\\n\") m, n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {}", "# input is used as ground truth print('\\n> Correlations between ground truth and", "change input activations if model changed # define input/output a_bottleneck = e_a1 else:", "done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def", "tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model", "impute | analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if __name__", "when ground truth is not provide, # input is used as ground truth", "Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) #", "learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: -----------", "{0}/*npy # do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid", "e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder)", "tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy #", "model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path)", "genes\") print(\"ground truth vs imputation, ground truth vs input\") gene_dir = p.tag+'/genes' #", "'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse':", "columns (same below) Y: imputed data matrix G: ground truth Return ----------- 4", "* 300). Delete later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n]", "pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3,", "skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch',", "information') parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute | analysis')", "load parameter module and use name 'p' #print(\"Usage: python late.py -mode <late> -infile", "genetate a list of genes using the gene_pair_list gene_list = [gene for pair", "= time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late", "== 1) or (epoch % p.display_step == 0): tic_log = time.time() print('#Epoch {}", "p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\\n> Generating heatmaps", "of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ',", "\\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test =", "or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage),", "def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr =", ") toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:',", "[gene for pair in List for gene in pair] for j in gene_list:", "stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec:", "matrix at the 'impute' mode. If the number of cells is larger than", "sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log =", "into csr, get rid of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM", "is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save imputation results", "step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'translate': # step2/load_saved", "if p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start =", "X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min", "M'.format(usage())) # todo: for backward support for older parameter files only # sample_size", "--> keep this in the main tf.reset_default_graph() # define placeholders and variables X", "pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h,", "tf from importlib.machinery import SourceFileLoader import math import psutil import time from scipy.sparse", "| translate | impute | analysis') parser.add_argument('-infile', help='file path of input data') return", "en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess,", "'./step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck,", "mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n]", "mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0}", "python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid))", "be recognized.') print('Valid mode options: pre-training | late | translate | impute |", "selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List,", "= min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run(", "{}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G", "pair in List for gene in pair] for j in gene_list: try: print('for", "+ '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing", "data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM", "import SourceFileLoader import math import psutil import time from scipy.sparse import csr_matrix import", "= eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr =", "'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode)", "input data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module and", "# step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'impute': #", "b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1)", "cell_ids = read_data(p) ##1. split data and save indexes #input p, input_matrix, cell_ids", "X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1,", "input/output a_bottleneck = e_a1 else: raise Exception(\"{} L not defined, only 3, 5,", "vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n> Generating PCA and tSNE", "\"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer()", "(epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save", "below) Y: imputed data matrix G: ground truth ''' # print('>READING DATA..') #", "'), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values # and", "= eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T", "x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and mse_nz of the", "= pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(),", "X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir )", "and tSNE plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info", "curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec,", "calculate and save imputation results for an input matrix at the 'impute' mode.", "in G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X)", "truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return", "section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training", "- tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {}, till", "exit() else: raise Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir +", "y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min,", "in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {},", "p.large_size: #impute on small data blocks to avoid high memory cost n_out_batches =", "imputed data matrix G: ground truth p: parameters Return ----------- None ''' #", "j] G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene", "{} M'.format(usage())) # todo: for backward support for older parameter files only #", "if model changed # define input/output a_bottleneck = e_a3 elif p.L == 5:", "# genetate a list of genes using the gene_pair_list gene_list = [gene for", "m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids", "print('RAM usage after building the model is: {} M'.format(usage())) epoch = 0 #2.6.", "code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input)", "# done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close()", "3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode == 'translate':", "step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve", "/ 5 / p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step))", "5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on learning curve p.snapshot_step =", "scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # #", "d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7: # change with", "p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode ==", "between imputation and input MSE between imputation and ground truth Parameters ------------ X:", "p.run_flag == 'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs =", "/and save/ the snapshot results of the current model on the whole dataset", "input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx]", "set (sample_input) print(\"> Impute and save.. \") if m > p.large_size: Y_input_df =", "# prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor", "or (p.mode == 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c))", "return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids,", "p.display_step = 1 # interval on learning curve p.snapshot_step = 5 # interval", "sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation", "# # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3", "ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch,", "not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To", "ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\".", "rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense()", "x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df =", "= scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4,", "sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids),", "X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression',", "''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X", "p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape)", "p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 =", "G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices", "sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder,", "to the run_flag sess = tf.Session() # restore variables saver = tf.train.Saver() if", "print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)", "print('in test mode\\n', 'num-genes set to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample", "np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def", "learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save", "training and validation sets by randomly sampling. try: p.sample_size sample_size = p.sample_size except:", "Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage))", "print('\\n> Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:',", "g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation", "# step2/load_saved from step1, for transfer learning p.stage = 'step2' # step1/step2 (not", "DATA..') print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x", "ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values,", "input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1", "X, Y, G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 =", "analyze_variation_in_genes(X, Y, G, p) # visualize results using all genes visualize_all_genes(X, Y, G,", "imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X,", "'d_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage,", "range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation", "truth Parameters ------------ X: input data matrix; genes in columns (same below) Y:", "p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 -", "matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to", "Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression])", "len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is", "G.shape) return X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between", "data and save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m,", "Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_,", "possibly < 0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega", "l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1)", "= [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], []", "of cells is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder:", "dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots", "= MSE(X, h), for genej, nz_cells print('RAM usage after building the model is:", "learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode == 'mse':", "', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif", "following sample_input is a matrix sampled randomly, and should it be a matrix", "list of genes using the gene_pair_list gene_list = [gene for pair in List", "sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids", "do: modify to display based on mode # def display_params(p): # PRINT PARAMETERS", "heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, '", "== 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:',", "subset/sort X, G to match Y # todo: support sparse matrix X =", "max_expression]) # histograms of correlations between genes in imputation and ground truth #", "sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3):", "mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input:", "= [], [] # msej = MSE(X, h), for genej, nz_cells print('RAM usage", "to do: modify to display based on mode # def display_params(p): # PRINT", "= scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4,", "in columns (same below) Y: imputed data matrix G: ground truth Return -----------", "- tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board writer batch_writer =", "# debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training", "gene write SDs to files plot histograms of SDs Parameters ------------ X: input", "test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log,", "print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state =", "e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2',", "SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X,", "#2.7.1 training model on mini-batches for i in range(num_batch): # x_batch indices =", "= G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag: print('in test mode')", "is used as ground truth print('\\n> Correlations between ground truth and imputation') print('ground", "reg_term def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd = os.getcwd() param_file", "Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input)", "sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder:", "imputation and ground truth # when ground truth is not provide, # input", "', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations of genes between", "', Y.shape) print('generating histogram for correlations of genes between ground truth and imputation')", "test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m,", "on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1:", "= [gene for pair in List for gene in pair] for j in", "= scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with", "p # to do: modify to display based on mode # def display_params(p):", "have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3])", "pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save imputation results for", "the number of cells is larger than a threshold (large_size: 1e5), save results", "> p.large_size: #impute on small data blocks to avoid high memory cost n_out_batches", "gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo: directly read into csr,", "= cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids)", "learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files", "en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1)", "epoch 1 and all display steps (epochs) if (epoch == 1) or (epoch", "p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations", "mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input", "Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values #", "log_dir, rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2)", "ground truth # when ground truth is not provide, # input is used", "M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input,", "1000 in default; if sample_size is less than the number of cells (m),", "mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def", "in the main tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32, [None,", "imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1,", "std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df,", "mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall =", "scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag,", "tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3,", "min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G is{}'.format(max_g, min_g))", "1 # interval on learning curve p.snapshot_step = 5 # interval of saving", "pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1,", "reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec,", "= input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input =", "Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else: G", "Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log,", "= input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids =", "MODE OR NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G", "SUMMARY print('\\nIn this code, matrices should have already been transformed into cell_row') print('Y", "scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g", "# e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): #", "or (epoch % p.display_step == 0): tic_log = time.time() print('#Epoch {} took: {}", "=pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for", "large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix", "csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning", "gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)',", "dir=gene_dir ) # Discretize gene expression values # and re-generate pairwise plots Y", "truth # and of correlations between cells in imputation and ground truth #", "#2.3 define loss # input X, h, p # return mse_nz, mse, reg_term", "p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval", "scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'):", "valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid =", "training mse and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch = sess.run(", "p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y =", "title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth", "= [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df", "#keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and", "= sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) #", "# PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:',", "(1000 * 300). Delete later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m,", "{:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used", "scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv files", "m) print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit()", "round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for", "indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch,", "scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess, log_dir +", "e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)", "= argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late | translate", "# step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate", "= X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y =", "decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1 in range(1,", "is: {} M'.format(usage())) # todo: for backward support for older parameter files only", "step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate =", "p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp,", "PCA and tSNE plots print('\\n> Generating PCA and tSNE plots') if p.cluster_file is", "results for an input matrix at the 'impute' mode. If the number of", "tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for", "== 'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag", "min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and", "X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder,", "# seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global", "= i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense()", "', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters", "X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression,", "({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n> Generating", "e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd)", "Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir )", "p, n, rand_state = 3) #2.3 define loss # input X, h, p", "set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder:", "todo: change input activations if model changed # define input/output a_bottleneck = e_a3", "= scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2,", "= min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy # do python -u", "title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or", "e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed #", "in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse", "(large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ---------- ''' if m >", ") mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz)", "not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '),", "X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l)", "results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X:", "if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation", "range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of", "Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation),", "'d_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy')", "p.L == 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1',", "in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1)", "np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def", "range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice(", "and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1", "} ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0:", "print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step)", "mse_nz of the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h],", "m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix,", "mse_nz, mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd =", "mode\\n', 'num-genes set to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size set", "Return ----------- None ''' print('\\n calculating standard deviation in each gene for input", "except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j)", "to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p # to do:", "input matrix is large (m > p.large_size), only save the #imputation results of", "title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max,", "range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground", "+ reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding to", "= scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz", "curve p.snapshot_step = 5 # interval of saving session, imputation p.m = 1000", "l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size)", "Y, G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X,", "Parameters ---------- ''' if m > p.large_size: #impute on small data blocks to", "writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph)", "load_params(mode, infile): '''load the 'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py'", "std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of", "= p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n>", "----------- X: input data matrix; genes in columns (same below) Y: imputed data", "# print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose()", "_.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to", "#2.1 init --> keep this in the main tf.reset_default_graph() # define placeholders and", "define loss # input X, h, p # return mse_nz, mse, reg_term mse_nz,", "layers and variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck,", "load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py and example.py Return -----------", "cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag)", "list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots", "None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected gene", "vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for ',", "h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save imputation", "in pair] for j in gene_list: try: print('for ', j) Y_j = Y.ix[:,", "of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df,", "calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and", "x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else", "% p.display_step == 0): tic_log = time.time() print('#Epoch {} took: {} CPU seconds;", "# load imputation results and input data X, Y, G = load_results(p) #", "print('\\n calculating standard deviation in each gene for input and imputed matrix') x_std_df,", "save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save", "') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test mode') input_df =", "mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip:", "expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max", "mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder:", "scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'):", "= input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder:", "batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0,", ") latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle,", "mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch)", "after reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df =", "index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage,", "a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and save imputation results", "#3.save the training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch /", "with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0, 1 if > 0;", "and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ", ") _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid']", "time.time() print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu -", "1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return", "(p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage)", "variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize results using all genes", "= pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and saving output: ',", "input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage after deleting", "trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer", "if p.L == 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 =", "p): ''' generate plots using all genes Parameters ------------ X: input data matrix;", "p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for", "p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids =", "', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation", "print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training')", "pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder,", "X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob", "\"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X,", "step2/load_saved from step1, for transfer learning p.stage = 'step2' # step1/step2 (not others)", "genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p()", "= SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile p.mode = mode", "DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input", "= X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices should have already", "by the user Parameters ------------ X: input data matrix; genes in columns (same", "p.tag = 'Eval' p.stage = 'Eval' else: print('The mode you entered cannot be", "std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None)", "print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n", "[] # msej = MSE(X, h), for genej, nz_cells print('RAM usage after building", "3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on learning curve p.snapshot_step", "cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage),", "help='file path of input data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load", "mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1):", "dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations of genes", "2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for", "= 'step2' # step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate =", ":p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files", "d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2,", "p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b = eval(b_name) w_arr", "activations if model changed # define input/output a_bottleneck = e_a3 elif p.L ==", "imputation results and input data X, Y, G = load_results(p) # calculate MSEs", "p.run_flag = 'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag = 'Eval'", "def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and input MSE between", "parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots", "{} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input)", "= input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n))", "and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def", "\") if m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input,", "# heatmaps of data matrices print('\\n> Generating heatmaps of data matrices') range_max, range_min", "sample_input is a matrix sampled randomly, and should it be a matrix containing", "(m), # we reconstruct the training and validation sets by randomly sampling. try:", "print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-)", "'sample size set to {}'.format(p.sample_size)) return p # to do: modify to display", "input X, h, p # return mse_nz, mse, reg_term mse_nz, mse, reg_term =", "0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices should", "cells is larger than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'.", "def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd = os.getcwd() param_file =", "data blocks to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2", "truth Return ----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values])", "print('Max in G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y,", "xlab='Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv')", "genes specified by the user Parameters ------------ X: input data matrix; genes in", "_.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/", "= scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' #", "MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec,", "{} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20],", "print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0})", "the #imputation results of a small sample set (sample_input) print(\"> Impute and save..", "reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a session accoding to the", "= read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3)", "{}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn)", "p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx =", "todo: change input activations if model changed # define input/output a_bottleneck = e_a2", "epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot", "def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define layers and", "tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X", "= tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep", "2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X -", "MSEs ''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is", "= scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model", "h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df =", "variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck,", "X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx,", "Y, G, p) # visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv):", "w = eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr", "p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth)", "p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec", "Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes,", "argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late | translate |", "scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y", "scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if model changed", "p) # visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser =", "del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or", "index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape # m: n_cells; n:", "p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step',", "vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values", "= 0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval' else:", "vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec", "epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log,", "argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh", "matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input =", "ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\".", "def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:',", "vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ =", ":].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time()", "e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder)", "p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training", "rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation", "pandas as pd import argparse import tensorflow as tf from importlib.machinery import SourceFileLoader", "} ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run(", "usage after splitting input data is: {} M'.format(usage())) # todo: for backward support", "truth # when ground truth is not provide, # input is used as", "= cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv, csv.gz) input_df =", "1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy # do", "/ p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation", "ground truth Parameters ------------ X: input data matrix; genes in columns (same below)", "for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables", "matrix G: ground truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input)", "= Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j] except KeyError:", "and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and save", "Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list", "in default; if sample_size is less than the number of cells (m), #", "on mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input)", "e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd)", "d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)", "p # return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef)", "input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(),", "# e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4',", "##1. load parameter module and use name 'p' #print(\"Usage: python late.py -mode <late>", "= p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j) Y_j = Y.ix[:,", "sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix =", "cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) ))", "print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y,", "#learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 /", "in Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values])", "mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage),", "input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled", "return p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step", "pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid))", "used as ground truth print('\\n> Correlations between ground truth and imputation') print('ground truth", "xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag)", "----------- None ''' print('\\n calculating standard deviation in each gene for input and", "csr_matrix(input_df) # todo: directly read into csr, get rid of input_df gene_ids =", "p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G", "histograms of correlations between genes in imputation and ground truth # and of", "n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a,", "and input data X, Y, G = load_results(p) # calculate MSEs mse1_nz, mse1,", "' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input", "dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE", "stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram =", "truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j) Y_j", "is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz =", "if p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input,", "e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # #", "ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches for i in", "avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage),", "min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input", "p.display_step == 0): tic_log = time.time() print('#Epoch {} took: {} CPU seconds; {}", ") def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from global_params.py and", "pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define layers and variables #", "of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G,", "pIn_holder, p, n, rand_state = 3) #2.3 define loss # input X, h,", "G: ground truth p: parameters Return ----------- None ''' print('\\n calculating standard deviation", "'''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage:", "mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation and", "'''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage:", "tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2,", "= scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2,", "p.run_flag == 'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif", "deviation in each gene write SDs to files plot histograms of SDs Parameters", "{}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz,", "genej, nz_cells print('RAM usage after building the model is: {} M'.format(usage())) epoch =", "= 'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage", "h, p # return mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h,", "input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c)", "p.m), 'sample size set to {}'.format(p.sample_size)) return p # to do: modify to", "e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2", "x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare", "Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log,", "decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2", "x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1,", "# visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description", "ground truth p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List =", "dimension: ', Y.shape) print('generating histogram for correlations of genes between ground truth and", "Min in Y is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is", "- tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM", "scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth,", "Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters", "{}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p # to do: modify", "translate | impute | analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs", "ground truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids,", "pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir)", "interval on learning curve p.snapshot_step = 5 # interval of saving session, imputation", "Y, G, p): ''' generate plots for genes specified by the user Parameters", "= sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM", "#, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec = [], [] # mse", "ground truth p: parameters Return ----------- None ''' print('\\n calculating standard deviation in", "Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" %", "== 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a,", "p: parameters from global_params.py and example.py Return ----------- None ''' # load imputation", "print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20,", "import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse", "%s\" % save_path) #3.save the training and test curve if p.mse_mode in ('mse_nz',", "input activations if model changed # define input/output a_bottleneck = e_a3 elif p.L", "for one step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4", "round(toc_wall - tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1)))", "time.time() #1.save imputation results #if the input matrix is large (m > p.large_size),", "print('> Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\"", "= mode if mode == 'pre-training': # step1/rand_init for pre-training on reference p.stage", "one step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 #", "in each gene for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y)", "else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids", "pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch,", "input matrix at the 'impute' mode. If the number of cells is larger", "tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term =", "and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation),", "sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define layers", "1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__)", "results using all genes visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X,", "gene_pair_list gene_list = [gene for pair in List for gene in pair] for", "(X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _", "''' cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name,", "p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X", "re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation')", "nz_mode='first' # or ignore ) print('generating histogram for correlations of cells between ground", "histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(),", "else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G,", "G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations between genes", "Test or not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect()", "imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage))", "mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec,", "by randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if", "xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n> Generating PCA", "set to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p # to", "= pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape #", "tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and extract gene IDs and", "only # sample_size is 1000 in default; if sample_size is less than the", "gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y,", "n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2,", "columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape # m: n_cells; n: n_genes", "sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df =", "MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec,", "is a matrix sampled randomly, and should it be a matrix containing #", "e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3,", "'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name,", "weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name =", "title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) #", "# return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p, n, rand_state", "print('total number of layers: {}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp))))", "\"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b = eval(b_name)", "mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:',", "Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation", "after building the model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch", "epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid,", "in List for gene in pair] for j in gene_list: try: print('for ',", "= MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h),", "mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage = 'impute' p.run_flag", "p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground", "7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with", "vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n> Generating PCA and tSNE plots')", "for gene in pair] for j in gene_list: try: print('for ', j) Y_j", "dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min,", "2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile):", "result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from global_params.py and example.py Return", "7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2", "name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32,", "plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir =", "vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth),", "_ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch')", "columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1):", "sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process =", "else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns,", "x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min,", "1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage,", "replace=False) #2.7.1 training model on mini-batches for i in range(num_batch): # x_batch indices", "h, p.reg_coef) #2.4 costruct the trainer --> keep this section in the main", "x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the", "if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data =", "range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following", "'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate", "saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h,", "if 0, 1 if > 0; not possibly < 0 in our data", "genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information')", "---------- ''' if m > p.large_size: #impute on small data blocks to avoid", "Y, G) # calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G, p)", "for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode, infile): '''load the", "p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING", "p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage,", "the trainer --> keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if", "reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5 files", "to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting", "backward support for older parameter files only # sample_size is 1000 in default;", "as ground truth print('\\n> Correlations between ground truth and imputation') print('ground truth dimension:", "the current model on the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h,", "truth p: parameters Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list", "'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids):", "mode == 'late': # step2/rand_init for one step training p.stage = 'step2' p.run_flag", "len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init --> keep this", "d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder)", "dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs imputation, ground truth vs", "----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec,", "for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:',", "pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2',", "cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state =", "[None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder", "p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late')", "or ignore ) print('generating histogram for correlations of cells between ground truth and", "index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x", "scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation',", "pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1,", "def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias", "# print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p)", "all genes visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X, Y, G,", "input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train:", "test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df)", "gene IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data", "title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE (X vs Y, nz)', dir=stage, skip=skip ) _ =", "= scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids)", "p.stage = 'step2' # step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate", "param_file).load_module() p.fname_input = infile p.mode = mode if mode == 'pre-training': # step1/rand_init", "mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram for correlations of cells between", "p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations", "gene expression values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete", "# save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation:", "e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4',", "== 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving", "sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0]", "= \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test", "In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand Init", "'imputation dimension: ', Y.shape) print('generating histogram for correlations of genes between ground truth", "en_de_layers): print('save weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1)", "1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] )", "p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode", "= scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ',", "scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir", "pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape # m:", "= scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else: G =", "change input activations if model changed # define input/output a_bottleneck = e_a3 elif", "DATA Parameters ------------ p: parameters from global_params.py and example.py Return ----------- X: input", "SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground", "= 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for 9L elif mode ==", "p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile p.mode =", "= np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage))", "if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag ==", "(epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the input matrix", "pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l)", "help='mode options: pre-training | late | translate | impute | analysis') parser.add_argument('-infile', help='file", "max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n", "example.py Return ----------- X: input data matrix; genes in columns (same below) Y:", "= 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess,", "files only # sample_size is 1000 in default; if sample_size is less than", "sets by randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4)", "tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1,", "cell_ids_test, fmt='%s') print('RAM usage after splitting input data is: {} M'.format(usage())) # todo:", "Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values", "index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder:", "p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth',", "of correlations between cells in imputation and ground truth # when ground truth", "data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage,", "# step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate", "# interval of saving session, imputation p.m = 1000 p.n = 300 p.sample_size", "Delete later if p.test_flag: print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids =", "h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame(", "# sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense()", "stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias)", "1.0, pIn_holder: 1.0} ) # validation mse and mse_nz of the sample validation", "handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:',", "import psutil import time from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg')", "--> keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in", "changed # define input/output a_bottleneck = e_a2 elif p.L == 3: # change", "num-cells set to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p #", "= load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G)", "tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') #", "p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # #", "| late | translate | impute | analysis') p.mode = 'invalid' return p", "stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ", "print('MSE2 between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def", "visualize_all_genes(X, Y, G, p): ''' generate plots using all genes Parameters ------------ X:", "results of a small sample set (sample_input) print(\"> Impute and save.. \") if", "range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots", "Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of", "= None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth,", "Init a session accoding to the run_flag sess = tf.Session() # restore variables", "p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2", "impute | analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs = 10", "d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder)", "rid of input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before deleting", "Y, G, p): '''calculate and visualize standard deviation in each gene write SDs", "p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G", "#visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1)", "= scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with", "(epochs) if (epoch == 1) or (epoch % p.display_step == 0): tic_log =", "std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots using", "learning p.stage = 'step2' # step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved", "trainer --> keep this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode", "print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid)", "/ x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data", "matrices print('\\n> Generating heatmaps of data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values])", "p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List,", "fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot", "[genes, cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or", "Return ----------- ''' print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage())) if", "tf.sign(X) # 0 if 0, 1 if > 0; not possibly < 0", "mse, reg_term def load_params(mode, infile): '''load the 'global_params.py' file ''' cwd = os.getcwd()", "p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch", "save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(),", "tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables # input p, X,", "with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3,", "p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir =", "of a small sample set (sample_input) print(\"> Impute and save.. \") if m", "cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid", "imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag,", "late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time() time_finish =", "eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr),", "from importlib.machinery import SourceFileLoader import math import psutil import time from scipy.sparse import", "than the number of cells (m), # we reconstruct the training and validation", "Y: imputed data matrix G: ground truth Return ----------- 4 MSEs ''' print('\\n>", "9L:3000 p.display_step = 1 # interval on learning curve p.snapshot_step = 5 #", "% save_path) #3.save the training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'):", "h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results", "after splitting input data is: {} M'.format(usage())) # todo: for backward support for", "log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save the training and", "columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values,", "dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1})", "CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall,", "p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp in", "range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir )", "exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load", "[(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df =", "import gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1):", "latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids)", "cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s')", "print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode ==", "p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode ==", "gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect()", "training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4", "than a threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ---------- '''", "'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled", "tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1,", "p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort", "min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression',", "use name 'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:])", "= np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx", "valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter vectors num_batch", "p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 =", "= 'invalid' return p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000,", "range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes',", "j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:, j]", "{} transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n subset (1000 *", "between imputation and ground truth Parameters ------------ X: input data matrix; genes in", "_ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape", "tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2,", "p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 =", "np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage,", "# # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'):", "Return ----------- X: input data matrix; genes in columns (same below) Y: imputed", "threshold (large_size: 1e5), save results of m//p.sample_size 'folds'. Parameters ---------- ''' if m", "truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for correlations of", "X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd)", "between cells in imputation and ground truth # when ground truth is not", "cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect() #", "save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name", "smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading", "#1.save imputation results #if the input matrix is large (m > p.large_size), only", "data matrix G: ground truth p: parameters Return ----------- None ''' # histograms", "columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={ X: x_out_batch, pIn_holder: 1,", "training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time()", "mse_nz, mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the", "h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if 0, 1 if", "and visualize variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize results using", "p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change", "e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3", "0; not possibly < 0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h,", "Scatterplots for selected genes\") print(\"ground truth vs imputation, ground truth vs input\") gene_dir", "Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values # and re-generate pairwise", "min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy # do python -u weight_clustmap.py", "= G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does", "= p.tag+'/genes' # genetate a list of genes using the gene_pair_list gene_list =", "mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each", "(1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample", "{} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save imputation results before", "and ground truth Parameters ------------ X: input data matrix; genes in columns (same", "parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir", "mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip )", "mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve", "pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List,", "stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1", "stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage),", "# TEST MODE OR NOT if p.test_flag: print('in test mode') Y = Y.ix[0:p.m,", "= cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled randomly, and should", "# To sparse input_matrix = csr_matrix(input_df) # todo: directly read into csr, get", "to {}'.format(p.sample_size)) return p # to do: modify to display based on mode", "3e-4 for 3-7L, 3e-5 for 9L elif mode == 'translate': # step2/load_saved from", "pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading took", "mse, reg_term mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer", "title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots for genes specified", "if (epoch == 1) or (epoch % p.display_step == 0): tic_log = time.time()", "psutil import time from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import", "matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix,", "with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1,", "on mac print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary of data", "'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5", "# step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 #", "each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore )", "index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids,", "for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps", "genes Parameters ------------ X: input data matrix; genes in columns (same below) Y:", "support sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST", "X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else: G", "per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug", "print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('>", "input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train", "print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p):", "0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The", "input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X,", "example.py Return ----------- None ''' # load imputation results and input data X,", "= 'Eval' else: print('The mode you entered cannot be recognized.') print('Valid mode options:", "def read_data(p): '''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM", "del(input_df) gc.collect() # working on mac print('RAM usage after deleting input_df: {} M'.format(usage()))", "----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in", "mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec,", "gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess,", "= 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute", "load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir)", "of cells is larger than a threshold (large_size: 1e5), save results of m//p.sample_size", "sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt')", "older parameter files only # sample_size is 1000 in default; if sample_size is", "G, p): ''' generate plots for genes specified by the user Parameters ------------", "df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the # of cells", "now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4]))", "('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch /", "g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max),", "curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage,", "if model changed # define input/output a_bottleneck = e_a2 elif p.L == 3:", "standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard", "std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min,", "'/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter", "{}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size)) return p", "load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) #", "cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X,", "| impute | analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if", "== 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n,", "#2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(),", "scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values /", "''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample", "= pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck,", "stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 **", "with tf.name_scope('Decoder_L4'): # # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # #", "cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage,", "b_name): w = eval(w_name) b = eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b)", "scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz)", "num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = [] mse_nz_batch_vec, mse_nz_valid_vec =", "In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('***", "{} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage after deleting input_df:", "decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage():", "(epoch % p.display_step == 0): tic_log = time.time() print('#Epoch {} took: {} CPU", "input_df gene_ids = input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df: {}", "max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) # histograms of correlations", "scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells',", "format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\\n> Generating", "Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode", "encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage,", "name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables # input p, X, pIn_holder,", "visualize variation in genes analyze_variation_in_genes(X, Y, G, p) # visualize results using all", "max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation),", "/ p.display_step)) elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec,", "in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not if", "print('\\n> Discrete gene pair relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List =", "end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during", "between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2,", "p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'):", "load imputation results and input data X, Y, G = load_results(p) # calculate", "epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified", "print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) #", "tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) #", "histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard Deviation', title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df(", "imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for", "scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids,", "p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X,", "title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p):", "math import psutil import time from scipy.sparse import csr_matrix import gc import matplotlib", "e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2", "except: sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)),", "print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for", "np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM", "scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) # Data Transformation print('>", "TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas", "todo: support sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] #", "# INPUT SUMMARY print('\\nIn this code, matrices should have already been transformed into", "'{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name):", "cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) #2.save model print('> Saving model..') save_path = saver.save(sess, log_dir", "e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 =", "p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode == 'analysis':", "', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate", "# # d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 =", "cell_ids): '''Calculate /and save/ the snapshot results of the current model on the", "implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"):", "X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j,", "# Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM", "mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and visualize variation in genes", "Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth:", "truth p: parameters Return ----------- None ''' print('\\n calculating standard deviation in each", "gene_dir = p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j) Y_j =", "= 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on", "= scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1,", "range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name =", "sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name)))", "print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g, min_g =", "e_a3 elif p.L == 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1", "mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid)", "print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation',", "truth and imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating", "Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage before reading data:", "cell_ids_valid[rand_idx] #?? the following sample_input is a matrix sampled randomly, and should it", "pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess,", "0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth,", "xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p:", "-mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode", "pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results", "cwd + '/' + param_file).load_module() p.fname_input = infile p.mode = mode if mode", "(same below) Y: imputed data matrix G: ground truth p: parameters Return -----------", "# interval on learning curve p.snapshot_step = 5 # interval of saving session,", "= min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df(", "using all genes Parameters ------------ X: input data matrix; genes in columns (same", "# purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns,", "7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz", "matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR", "TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input ==", "the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1,", "= sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch", "= pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder,", "p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L elif mode", "'''READ DATA Parameters ------------ p: parameters from global_params.py and example.py Return ----------- X:", "j] except KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same',", "p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global", "d_b3, d_a3 if p.L == 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1,", "= min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard", "tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout", "title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input", "sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def save_whole_imputation(sess,", "current model on the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X:", "p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)',", "display steps (epochs) if (epoch == 1) or (epoch % p.display_step == 0):", "std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df(", "b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers):", "truth vs imputation, ground truth vs input\") gene_dir = p.tag+'/genes' # genetate a", "input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(),", "imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close()", "df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code = sess.run( a_bottleneck, feed_dict={", "correlations between genes in imputation and ground truth # and of correlations between", "cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time() time_finish = round((toc_stop -", "({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min,", "h) #msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for genej,", "DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage before reading", "min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)),", "# define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir", "5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif", "None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio'])", "p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change", "model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save imputation", "data matrix G: ground truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input,", "# Discretize gene expression values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y)", "''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids =", "h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and mse_nz", "df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag:", "# define placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input", "build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3): #5.2 define layers and variables", ":].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train", "scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) # # with tf.name_scope('Decoder_L4'): # # d_w4, d_b4", "dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots for genes specified by", "scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'):", "p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('>", "_ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log,", "pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train)", "x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data,", "gene_ids, cell_ids, p, m): ''' calculate and save imputation results for an input", "== 'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode", "Generating PCA and tSNE plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file)", "mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard", "x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input),", "pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train,", "for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage,", "sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {}, sample_valid: {}, sample_input", "en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2,", "matrix is large (m > p.large_size), only save the #imputation results of a", "Parameters ------------ p: parameters from global_params.py and example.py Return ----------- None ''' #", "matrix; genes in columns (same below) Y: imputed data matrix G: ground truth", "= scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df mem", "for an input matrix at the 'impute' mode. If the number of cells", "# do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till", "G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y,", "(sample_input) print(\"> Impute and save.. \") if m > p.large_size: Y_input_df = fast_imputation(sess,", "curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('>", "sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df =", "4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y", "ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs", "\"\\n\") m, n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells,", "title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _ =", "ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground", "range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias", "'{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m the", "= scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1,", "vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating histogram", "print(\"\\n> Discrete imputation vs ground truth\") gene_dir = p.tag+'/genes_discrete' for j in gene_list:", "None ''' # load imputation results and input data X, Y, G =", "loss # input X, h, p # return mse_nz, mse, reg_term mse_nz, mse,", "= scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids =", "pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_", "= time.time() #1.save imputation results #if the input matrix is large (m >", "input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids,", "sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE", "debug # print('d_w1', sess.run(d_w1[1, 0:4])) # verified when GradDescent used # training mse", "= np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #??", "if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode == 'translate'): print('data", "sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data", "round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid)", "G) # purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values,", "compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df", "------------ p: parameters from global_params.py and example.py Return ----------- X: input data matrix;", "/ g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for y,", "{}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log time for each epoch:", "= time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\") print(\"Wall Time Used:", "the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch,", "scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder)", "before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder:", "mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation and", "define layers and variables # input p, X, pIn_holder, pHidden_holder, n # return", "5 # interval of saving session, imputation p.m = 1000 p.n = 300", "'invalid' return p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000", "p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info", "cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd", "print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:',", "mse_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log:", "all display steps (epochs) if (epoch == 1) or (epoch % p.display_step ==", "(0) #save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train =", "mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and visualize variation", "x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx,", "p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1", "save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): '''", "| analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs = 10 #", "print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 =", "= parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder", "on mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5", "else: print('The mode you entered cannot be recognized.') print('Valid mode options: pre-training |", "ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and tSNE plots print('\\n> Generating PCA and", "late_main(p, log_dir, rand_state=3): ##0. read data and extract gene IDs and cell IDs", "an input matrix at the 'impute' mode. If the number of cells is", "whole data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df,", "options: pre-training | late | translate | impute | analysis') parser.add_argument('-infile', help='file path", "3e-5 for 3-7L, 3e-6 for 9L elif mode == 'late': # step2/rand_init for", "Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer", "+ \"/{}.ckpt\".format(p.stage)) print(\"Model saved in: %s\" % save_path) #3.save the training and test", "= sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df", "# verified when GradDescent used # training mse and mse_nz of the last", "Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and input", "#def save_whole_imputation: def save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m):", "#print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read data and", "n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m,", "tic_wall, 2) )) print('num-mini-batch per epoch: {}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage:", "= G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code,", "elif mode == 'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The mode", "tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz +", "n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2,", "range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j,", "cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data", "1 and all display steps (epochs) if (epoch == 1) or (epoch %", "----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected", "= time.clock(), time.time() #2.7.2 save the results of epoch 1 and all display", "plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str')", "+ reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse +", "and variables # input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1)", "= input_df.ix[:p.m, :p.n] gc.collect() # To sparse input_matrix = csr_matrix(input_df) # todo: directly", "7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1,", "plot histograms of SDs Parameters ------------ X: input data matrix; genes in columns", "''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\")", "std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard", "(input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20,", "', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if m", "p.fname_input = infile p.mode = mode if mode == 'pre-training': # step1/rand_init for", "del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid =", "cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and save indexes", "if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 = time.time()", "en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec)", "d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7: # change", "time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\") print(\"Wall Time Used: {} seconds\".format(time_finish))", "skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec:", "pre-training epoch (0) #save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train,", "Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after", "fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df", "cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv, csv.gz) input_df", "print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder,", "dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs", "'), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------", "return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def", "index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage", "#2.7.2 save the results of epoch 1 and all display steps (epochs) if", "till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1', sess.run(d_w1[1,", "matrix G: ground truth p: parameters Return ----------- None ''' # histograms of", "cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): '''", "cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage,", "= X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT if", "encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name,", "(X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_batch_vec, mse_valid_vec))) _", "pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables # input", "usage during whole data imputation and saving output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage,", ") # heatmaps of data matrices print('\\n> Generating heatmaps of data matrices') range_max,", "3): #5.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n", "SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation", "scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder)", "small sample set (sample_input) print(\"> Impute and save.. \") if m > p.large_size:", "mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ', mse2_nz) mse2 =", "print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate =", "randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if sample_size", "p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid =", "Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall - tic_wall, 2) )) print('num-mini-batch", "----------- ''' print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'):", "ground truth vs input\") gene_dir = p.tag+'/genes' # genetate a list of genes", "#4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop =", "training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 /", "b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage),", "file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p =", "after deleting input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _ =", "for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx),", "print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in", "for input and imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df =", "= tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif", "whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder:", "scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of", "h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop =", "cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input,", "data matrix G: ground truth p: parameters Return ----------- None ''' print('\\n calculating", "nz_mode='first' ) # heatmaps of data matrices print('\\n> Generating heatmaps of data matrices')", "d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3", "d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2,", "#save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse,", "encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess,", "_ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch')", "- tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if", "pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f')", "mode you entered cannot be recognized.') print('Valid mode options: pre-training | late |", "param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module() p.fname_input = infile p.mode", "pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,", "', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j = X.ix[:,", "'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag =", "list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs imputation, ground", "feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr,", "1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole data imputation and saving", "'''Calculate /and save/ the snapshot results of the current model on the whole", "and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz,", "# sample_size is 1000 in default; if sample_size is less than the number", "elif p.mse_mode == 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage,", ":].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 }", "not defined, only 3, 5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h", "# List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation", "encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram", "np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] /", "M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4])", "data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m,", "np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the", "print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:',", "print('in test mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m]", "L not defined, only 3, 5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck,", "generate plots for genes specified by the user Parameters ------------ X: input data", "d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1,", "', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation", "def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training |", "[(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data", "title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression]) #", "msej_valid_vec = [], [] # msej = MSE(X, h), for genej, nz_cells print('RAM", "p.L == 3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1',", "#!/usr/bin/python import sys import os import numpy as np import pandas as pd", "late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if", "#2.save model print('> Saving model..') save_path = saver.save(sess, log_dir + \"/{}.ckpt\".format(p.stage)) print(\"Model saved", "= 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers):", "between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1,", "p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training", "input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train", "importlib.machinery import SourceFileLoader import math import psutil import time from scipy.sparse import csr_matrix", "max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G is{}'.format(max_g,", "selected genes\") print(\"ground truth vs imputation, ground truth vs input\") gene_dir = p.tag+'/genes'", "encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name)))", "print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE", "is not provide, # input is used as ground truth print('\\n> Correlations between", "import os import numpy as np import pandas as pd import argparse import", "# print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0.", "eval(b_name) w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T =", "size set to {}'.format(p.sample_size)) return p # to do: modify to display based", "p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\", _, \"\\n\") m, n =", "= input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids =", "= time.time() log2_time = round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system(", "if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1", "''' print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): #", "this section in the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'):", "epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning", "b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight", "mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init", "only 3, 5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X,", "results and input data X, Y, G = load_results(p) # calculate MSEs mse1_nz,", "X, Y, G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and", "save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder:", "= scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose() # X", "(p.mode == 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:')", "p.L == 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1',", "sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X: input_matrix.todense(), pIn_holder:", "0 p.learning_rate = 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix,", "prep mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log", "and input MSE between imputation and ground truth Parameters ------------ X: input data", "== p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the input matrix is", "SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): '''", "cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info,", "mse_batch_vec, mse_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1):", "p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):',", "p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete", "xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth), dir=p.tag, range=[min_expression, max_expression])", "input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m,", "# define input/output a_bottleneck = e_a1 else: raise Exception(\"{} L not defined, only", "p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with", "dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio", "matrix G: ground truth Return ----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y,", "scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression,", "save the #imputation results of a small sample set (sample_input) print(\"> Impute and", "p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 =", "max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression', title='Ground Truth({})'.format(p.name_ground_truth),", "of SDs Parameters ------------ X: input data matrix; genes in columns (same below)", "dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G, xlab='Expression',", "Y: imputed data matrix G: ground truth ''' # print('>READING DATA..') # X", "print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir, rand_state=3): ##0. read", "sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(),", "tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2,", "List = p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir)", "reconstruct the training and validation sets by randomly sampling. try: p.sample_size sample_size =", "- tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in {0}/*npy", "G to match Y # todo: support sparse matrix X = X.loc[Y.index, Y.columns]", "provide, # input is used as ground truth print('\\n> Correlations between ground truth", "p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X,", "cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop -", "p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000,", "KeyError: print('KeyError: gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) +", "parser.add_argument('-mode', help='mode options: pre-training | late | translate | impute | analysis') parser.add_argument('-infile',", "if p.test_flag: print('in test mode') Y = Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n]", "for genej, nz_cells print('RAM usage after building the model is: {} M'.format(usage())) epoch", "{}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X, pHidden_holder, pIn_holder, p, n, rand_state", "'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The mode you entered cannot", "np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting input", "gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py and", "imputed data matrix G: ground truth Return ----------- 4 MSEs ''' print('\\n> MSE", "pIn_holder, p, n, rand_state = 3): #5.2 define layers and variables # input", "scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3,", "None ''' print('\\n calculating standard deviation in each gene for input and imputed", "'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage),", "and save.. \") if m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder,", "X: x_out_batch, pIn_holder: 1, pHidden_holder: 1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids,", "scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2,", "scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2,", "scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Decoder_L2'):", "= 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag", "dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\\n> Generating heatmaps of", "== 'analysis': p.tag = 'Eval' p.stage = 'Eval' else: print('The mode you entered", "and mse_nz of the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run(", "our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse =", "print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth:", "= scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with", "# gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes", "mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation and Input:", "d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo: change input activations if", "print('generating histogram for correlations of cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values,", "10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on learning", "X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min,", "<late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p = load_params(argms.mode, argms.infile) if p.mode =='invalid':", "# 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step = 1 # interval on learning curve", "== 5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n,", "set to {}'.format(p.sample_size)) return p # to do: modify to display based on", "title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( G,", "'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx =", "en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias =", "p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3,", "None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y,", "/ p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input,", "** 20) ram = round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path)", "parameter files only # sample_size is 1000 in default; if sample_size is less", "and visualize standard deviation in each gene write SDs to files plot histograms", "{}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step", "scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y", "scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max),", "deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage after", "mini-batches for i in range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1))", "fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after splitting", "# # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 =", "'w') as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size,", "save snapshot step if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs):", "min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed()", "3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch", "columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else:", "genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p:", "the user Parameters ------------ X: input data matrix; genes in columns (same below)", "= fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else:", "gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n", "if model changed # define input/output a_bottleneck = e_a1 else: raise Exception(\"{} L", "npy') for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name", "feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save", "decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name,", "cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx = \\", "truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth,", "a matrix sampled randomly, and should it be a matrix containing # sample_training", "print('\\n> Generating PCA and tSNE plots') if p.cluster_file is not None: cluster_info =", "scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y # todo: support sparse", "a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0", "of the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X:", "a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3,", ") scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground Truth',", "ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from", "1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code", "reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h,", "({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values, title='Input ({})'.format(p.name_input), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min,", "n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def", "index=cell_ids) print('RAM usage during whole data imputation and saving output: ', '{} M'.format(usage()))", "feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'.", "def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters:", "p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) #4.save save_bottleneck_representation print(\">", "e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if", "zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min())", "skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec:", "print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1})", "skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or step2 ''' print('> plotting learning curves')", "0 #2.6. pre-training epoch (0) #save imputation results before training steps print(\"Evaluation: epoch{}\".format(epoch))", "cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage after", "title='Imputation({})'.format(p.name_imputation), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df(", "Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df", "'\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j)", "G, p) # visualize selected genes visualize_selected_genes(X, Y, G, p) def parse_args(argv): parser", "3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3)) mse_batch_vec.append(mse_train) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs", "rand_state = 3) #2.3 define loss # input X, h, p # return", "sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else: sample_input = input_matrix.todense() sample_train =", "dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters from global_params.py", "options: pre-training | late | translate | impute | analysis') p.mode = 'invalid'", "cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s')", "df1_train.ix[ridx_batch, :] x_batch = input_train[ridx_batch, :].todense() sess.run(trainer, feed_dict={X: x_batch, pIn_holder: p.pIn, pHidden_holder: p.pHidden})", "and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene pair relationship in", "% p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation", "print(\"input_df:\\n\", _, \"\\n\") m, n = input_matrix.shape # m: n_cells; n: n_genes print('input_matrix:", "is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info = cluster_info.astype('str') else: cluster_info =", "time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix,", "d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1',", "reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix", "print('generating histogram for correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values,", "print(\"ground truth vs imputation, ground truth vs input\") gene_dir = p.tag+'/genes' # genetate", "late | translate | impute | analysis') parser.add_argument('-infile', help='file path of input data')", "or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:',", "mse2 = calculate_MSEs(X, Y, G) # calculate and visualize variation in genes analyze_variation_in_genes(X,", "min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h,", "batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) #", "tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch,", "SourceFileLoader import math import psutil import time from scipy.sparse import csr_matrix import gc", "mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and", "= scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) #", "PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if", "[{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total", "is{}'.format(max_y, min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in", "print('\\n max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag,", "std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)]", "stage), sess.run(eval(decoder_weight_name))) np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process", "print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac", "h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix =", "building the model is: {} M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0)", "in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ", "mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X,", "return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed global e_w1, e_b1, e_a1, e_w2, e_b2, e_a2,", "parameters from global_params.py and example.py Return ----------- None ''' # load imputation results", "d_w4, d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4,", "Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage))", "pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(),", "= sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df", "analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if __name__ == '__main__':", "X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1", "range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD',", "output: ', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage,", "open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx", "'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p =", "#input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train,", "name 'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms = parse_args(sys.argv[1:]) p", "mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation in each", "# os.system( # '''for file in {0}/*npy # do python -u weight_clustmap.py $file", "elif p.run_flag == 'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs", "#3. load data input_matrix, gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids,", "# mse = MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej =", "Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder: 1.0} )", "# # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd)", ").set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and", "pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading", "not possibly < 0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2),", "e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 =", "'''calculate and visualize standard deviation in each gene write SDs to files plot", "Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' )", "build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define loss # input", "$file {0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close()", "pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder, p,", "dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1)", "p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start))", "3: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1,", "just impute and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0", "== p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X,", "does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '),", "= time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid) print('log", "/ 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step))", "print('transformation_input:', p.transformation_input) if (p.mode == 'pre-training') or (p.mode == 'late') or (p.mode ==", "m*n subset (1000 * 300). Delete later if p.test_flag: print('in test mode') input_matrix", "p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in range(n_out_batches+1): start_idx", "'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p", "y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min =", "= load_params(argms.mode, argms.infile) if p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage)", "matrix G: ground truth p: parameters Return ----------- None ''' print('\\n calculating standard", "xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression values # and re-generate", "mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid =", "mode='row-wise', nz_mode='first' ) # heatmaps of data matrices print('\\n> Generating heatmaps of data", "code, matrices should have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation,", "G = G.loc[Y.index, Y.columns] # TEST MODE OR NOT if p.test_flag: print('in test", "raise Exception(\"{} L not defined, only 3, 5, 7 implemented\".format(p.L)) h = d_a1", "gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' # or ignore ) print('generating", "stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1)", "user Parameters ------------ X: input data matrix; genes in columns (same below) Y:", "p.n_hidden_3, p.n_hidden_4, p.sd) # # e_a4 = scimpute.dense_layer('encoder4', e_a3, e_w4, e_b4, pHidden_holder) #", "0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape)", "with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2,", "\"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name)", "{}, till now: {}'.format(i+1, epoch*(i+1))) print('RAM usage: {:0.1f} M'.format(usage())) # debug # print('d_w1',", "range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir )", "return p # to do: modify to display based on mode # def", "the run_flag sess = tf.Session() # restore variables saver = tf.train.Saver() if p.run_flag", "input_matrix, gene_ids, cell_ids def load_results(p): '''READ DATA Parameters ------------ p: parameters from global_params.py", "print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs)", "min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df( X, xlab='Expression', title='Input({})'.format(p.name_input), dir=p.tag,", "as tf from importlib.machinery import SourceFileLoader import math import psutil import time from", "train_idx, valid_idx, test_idx = \\ scimpute.split__csr_matrix(input_matrix, a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid", "# restore variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL", "tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs imputation, ground truth", "os.system( # '''for file in {0}/*npy # do python -u weight_clustmap.py $file {0}", "G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to", "feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr,", "the training and test curve if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5", "split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag)", "'\\n(Ground Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same',", "step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning Curve MSE.{}\".format(stage), ylabel='MSE", "numpy as np import pandas as pd import argparse import tensorflow as tf", "'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight =", "input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) latent_code_df = pd.DataFrame(data=latent_code, index=cell_ids) print('RAM usage during whole", "= input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data", "scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n,", "= p.sample_size except: sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx =", "scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with tf.name_scope('Encoder_L4'): # # e_w4, e_b4", "Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7)", "gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of the current model on", "init --> keep this in the main tf.reset_default_graph() # define placeholders and variables", "std_ratio_yg_df, xlab='Ratio of Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag)", "= os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd +", "pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define loss # input X,", "with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1,", "output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: # if", "xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground Truth vs", "i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and", "min(y_std_df.min(), x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations')", "= 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L,", "e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder)", "= scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between Imputation and Input: ',", "imputation and ground truth Parameters ------------ X: input data matrix; genes in columns", "Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth,", "p: parameters from global_params.py and example.py Return ----------- X: input data matrix; genes", "sample_size is 1000 in default; if sample_size is less than the number of", "index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X", "'late': # step2/rand_init for one step training p.stage = 'step2' p.run_flag = 'rand_init'", "pd import argparse import tensorflow as tf from importlib.machinery import SourceFileLoader import math", "/ 5 / p.display_step)) #4.save save_bottleneck_representation print(\"> save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={", "d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 =", "G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between", "calculate_MSEs(X, Y, G) # calculate and visualize variation in genes analyze_variation_in_genes(X, Y, G,", "len(train_idx), replace=False) #2.7.1 training model on mini-batches for i in range(num_batch): # x_batch", "visualize_weight(sess, stage, decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy') for", "def usage(): process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram", "imputed data matrix G: ground truth p: parameters Return ----------- None ''' gene_pair_dir", "max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:',", "continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation',", "print(\"Model saved in: %s\" % save_path) #3.save the training and test curve if", "print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In Rand", "PCA and tSNE plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) #", "saved in: %s\" % save_path) #3.save the training and test curve if p.mse_mode", "the results of epoch 1 and all display steps (epochs) if (epoch ==", "X: input data matrix; genes in columns (same below) Y: imputed data matrix", "= cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage, p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test,", "cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n',", "gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of selected gene pairs\") scimpute.gene_pair_plot(Y,", "== 'mse': #learning_curve_mse(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, p.stage, skip=math.floor(epoch /", "mini-batch, and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log =", "visualize_weight(sess, stage, encoder_weight, encoder_bias) decoder_bias = 'd_b'+str(l1) decoder_weight = 'd_w'+str(l1) visualize_weight(sess, stage, decoder_weight,", "20) ram = round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python", "print('The mode you entered cannot be recognized.') print('Valid mode options: pre-training | late", "columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if", "imputation and input MSE between imputation and ground truth Parameters ------------ X: input", "pHidden_holder: 1.0, pIn_holder: 1.0} ) # validation mse and mse_nz of the sample", "with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2,", "print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for l_tmp", "G, p): ''' generate plots using all genes Parameters ------------ X: input data", "p.tag+'/genes_discrete' for j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j]", "truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids", "G): '''calculate MSEs MSE between imputation and input MSE between imputation and ground", "parameters Return ----------- None ''' # histograms of gene expression max_expression = max(G.values.max(),", "scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\")", "print('RAM usage during mini-batch imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle,", "len(sample_input_cell_ids) )) ##2. model training and validation #2.1 init --> keep this in", "os import numpy as np import pandas as pd import argparse import tensorflow", "subset (1000 * 300). Delete later if p.test_flag: print('in test mode') input_matrix =", "{}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2. model training and validation #2.1", "calculating standard deviation in each gene for input and imputed matrix') x_std_df, y_std_df", "usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in test mode') input_df", "epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall = time.clock(), time.time() ridx_full", "usage after reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df", "mode == 'translate': # step2/load_saved from step1, for transfer learning p.stage = 'step2'", "== 'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0", "the main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz')", "'pre-training': # step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag = 'rand_init'", "wrong') #2.5 Init a session accoding to the run_flag sess = tf.Session() #", "and ground truth # and of correlations between cells in imputation and ground", "print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X", "j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist')", "name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers", "#save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time =", "p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L)) for", "relationship in imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation", "for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data", "sess.run([mse, mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train,", "G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df =", "= input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx] #?? the following sample_input is a matrix", "xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD", "variables saver = tf.train.Saver() if p.run_flag == 'load_saved': print('*** In TL Mode') saver.restore(sess,", "m: n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids,", "None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio'])", "dir=p.tag) scimpute.hist_df( x_std_df, xlab='Standard Deviation', title='Input({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation',", "a=p.a, b=p.b, c=p.c) cell_ids_train = cell_ids[train_idx] cell_ids_valid = cell_ids[valid_idx] cell_ids_test = cell_ids[test_idx] np.savetxt('{}/train.{}_index.txt'.format(p.stage,", "the input matrix is large (m > p.large_size), only save the #imputation results", "mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2))", "vmin=range_min, dir=p.tag) scimpute.heatmap_vis(G.values, title='Ground_truth ({})'.format(p.name_ground_truth), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) # PCA and", "vs input\") gene_dir = p.tag+'/genes' # genetate a list of genes using the", "this in the main tf.reset_default_graph() # define placeholders and variables X = tf.placeholder(tf.float32,", "tf.global_variables_initializer() sess.run(init) elif p.run_flag == 'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess,", "d_a1 = scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations if", "print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------ p:", "M'.format(usage())) epoch = 0 #2.6. pre-training epoch (0) #save imputation results before training", "step if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2 =", "and all display steps (epochs) if (epoch == 1) or (epoch % p.display_step", "defined, only 3, 5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def", "+ '/' + param_file).load_module() p.fname_input = infile p.mode = mode if mode ==", "been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):',", "p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape',", "# cluster_info = cluster_info.astype('str') else: cluster_info = None scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X,", "# visualize results using all genes visualize_all_genes(X, Y, G, p) # visualize selected", "Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def", "= scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y is{}'.format(max_y, min_y)) max_g,", "Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene expression", "b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name,", "p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) # # with", "with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2,", "sample_size = p.sample_size except: sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx", "input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation", "# training mse and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch =", "p.pHidden}) toc_cpu, toc_wall = time.clock(), time.time() #2.7.2 save the results of epoch 1", "{} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input", "and example.py Return ----------- None ''' # load imputation results and input data", "G def calculate_MSEs(X, Y, G): '''calculate MSEs MSE between imputation and input MSE", "tSNE plots') if p.cluster_file is not None: cluster_info = scimpute.read_data_into_cell_row(p.cluster_file) # cluster_info =", "print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 =", "input_df.columns cell_ids = input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect()", "save.. \") if m > p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder,", "toc_wall = time.clock(), time.time() #2.7.2 save the results of epoch 1 and all", "'\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) # Discretize gene", "''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X", "d_b4 = scimpute.weight_bias_variable('decoder4', p.n_hidden_4, p.n_hidden_3, p.sd) # # d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4,", ") # Discretize gene expression values # and re-generate pairwise plots Y =", "= [(y/x if x!=0 else None) for y, x in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df", "xlab='Ratio of Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df,", "print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary of data print(\"name_input:\", p.name_input)", "cells in imputation and ground truth # when ground truth is not provide,", "p.mode =='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time()", "< m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense()", "> p.large_size: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df,", "files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: ''' print('> plotting learning curves')", "print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu,", "e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 =", "scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w =", "print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step)", "vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y,", "= input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of", "scimpute.gene_pair_plot(Y, list=List, tag='(Imputation)', dir=gene_pair_dir) scimpute.gene_pair_plot(X, list=List, tag='(Input)', dir=gene_pair_dir) scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n>", "#def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv", "imputed matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) #", "print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers: {}'.format(p.L))", "G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y # todo:", "print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test or not if p.test_flag: print('in", "n_cells; n: n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids", "with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 =", "##1. split data and save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid,", "after reading sparse matrix: {} M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..')", "time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\") print(\"Wall Time Used: {}", "5, 7 implemented\".format(p.L)) h = d_a1 return a_bottleneck, h def build_metrics(X, h, coef):", "elif mode == 'translate': # step2/load_saved from step1, for transfer learning p.stage =", "visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias =", "1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2", "code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 = time.time() log2_time", "x!=0 else None) for y, x in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data,", "else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match Y #", "Y.columns] # TEST MODE OR NOT if p.test_flag: print('in test mode') Y =", "0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids, p,", "G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def", "cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx, test_idx", "else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage,", "0:4])) # verified when GradDescent used # training mse and mse_nz of the", "scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1 = scimpute.dense_layer('decoder1', d_a2, d_w1, d_b1, pHidden_holder) # todo:", "mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse", "between genes in imputation and ground truth # and of correlations between cells", "of cells (m), # we reconstruct the training and validation sets by randomly", "print('X (input):', p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n',", "stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec:", "xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Input)", "set to {}, num-cells set to {}\\n'.format(p.n, p.m), 'sample size set to {}'.format(p.sample_size))", "if sample_size < m: np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train =", "range=(std_min, std_max), dir=p.tag) scimpute.hist_df( g_std_df, xlab='Standard Deviation', title='Ground Truth({})'.format(p.name_input), range=(std_min, std_max), dir=p.tag) scimpute.hist_df(", "p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids =", "steps print(\"Evaluation: epoch{}\".format(epoch)) epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder:", "is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation", "e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3", "#2.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n #", "dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G,", "cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def visualize_selected_genes(X, Y, G, p): ''' generate plots for genes", "stage: step1 or step2 ''' print('> plotting learning curves') scimpute.learning_curve(epoch_log, mse_batch_vec, mse_valid_vec, title=\"Learning", "global_params.py and example.py Return ----------- X: input data matrix; genes in columns (same", "= open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as handle: for i_ in", "gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation)", ":].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx,", "np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage, en_de_layers=p.l) toc_log2 =", "print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of layers:", "1 } ) df_out_batch = pd.DataFrame( data=y_out_batch, columns=gene_ids, index=cell_ids[range(start_idx, end_idx)] ) latent_code =", "= 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6", "scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df mem usage:", "a matrix containing # sample_training and sample_valid rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input", "print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input) if (p.mode", "7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1", "##0. read data and extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids", "= np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx", "= optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer =", "for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3", "max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression,", "title=str(str(j) + '\\n(Ground Truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) #", "imputation') gene_pair_dir = p.tag+'/pairs_discrete' # List = p.gene_pair_list scimpute.gene_pair_plot(Y, list=List, tag='(Imputation Discrete) ',", "d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L == 7:", "h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term def load_params(mode,", "MSEs MSE between imputation and input MSE between imputation and ground truth Parameters", "----------- None ''' # histograms of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max())", "into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input, p.ori_input,", "input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) # Data", "gene_ids, cell_ids = read_data(p) #4. call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state", "msej = MSE(X, h), for genej, nz_cells print('RAM usage after building the model", ") ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X',", "ground truth is not provide, # input is used as ground truth print('\\n>", "'{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess, stage, en_de_layers): for l1 in range(1, en_de_layers+1):", "and ground truth # when ground truth is not provide, # input is", "p.c)) print('\\nParameters:') print('mse_mode:', p.mse_mode) print('stage:', p.stage) print('init:', p.run_flag) print('test_mode:', p.test_flag) print('total number of", "p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation)", "if p.mse_mode in ('mse_nz', 'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec,", "p.sample_size = int(240) print('in test mode\\n', 'num-genes set to {}, num-cells set to", "X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth:", "and of correlations between cells in imputation and ground truth # when ground", "file in {0}/*npy # do python -u weight_clustmap.py $file {0} # done'''.format(p.stage) #", "rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid = input_valid[rand_idx, :].todense() sample_valid_cell_ids = cell_ids_valid[rand_idx]", "stage, en_de_layers): print('save weights in npy') for l1 in range(1, en_de_layers+1): encoder_weight_name =", "MSE(X, h) #msej_batch_vec, msej_valid_vec = [], [] # msej = MSE(X, h), for", "step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1:", "parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late |", "the gene_pair_list gene_list = [gene for pair in List for gene in pair]", "= input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid))) sample_valid", "1000 p.n = 300 p.sample_size = int(240) print('in test mode\\n', 'num-genes set to", "p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse + reg_term) else: raise", "before reading data: {} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5", "----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data, pIn_holder: 1, pHidden_holder: 1}) # save", "MSEs mse1_nz, mse1, mse2_nz, mse2 = calculate_MSEs(X, Y, G) # calculate and visualize", "{0} # done'''.format(p.stage) # ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close()", "you entered cannot be recognized.') print('Valid mode options: pre-training | late | translate", "import numpy as np import pandas as pd import argparse import tensorflow as", "print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape',", "'./{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids = read_data(p)", "# histograms of correlations between genes in imputation and ground truth # and", "placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder =", "coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X',", "= scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz, 7) print('MSE2_NZ between Imputation and Ground_truth: ',", "h), for genej, nz_cells print('RAM usage after building the model is: {} M'.format(usage()))", "'; mse_valid:', mse_valid) print('log time for each epoch: {}\\n'.format(round(toc_log - tic_log, 1))) mse_batch_vec.append(mse_batch)", "n, rand_state = 3): #5.2 define layers and variables # input p, X,", "= scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1,", "and save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n", "for genes specified by the user Parameters ------------ X: input data matrix; genes", "h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X) # 0 if", "1) or (epoch % p.display_step == 0): tic_log = time.time() print('#Epoch {} took:", "nz_cells print('RAM usage after building the model is: {} M'.format(usage())) epoch = 0", "imputation output Parameters ------------ p: parameters from global_params.py and example.py Return ----------- None", "= tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables # input p,", "_ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h, X,", "def save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1 in range(1, en_de_layers+1):", "the following sample_input is a matrix sampled randomly, and should it be a", "'impute' mode. If the number of cells is larger than a threshold (large_size:", "the training and validation sets by randomly sampling. try: p.sample_size sample_size = p.sample_size", "mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on mse')", "mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and", "data matrices') range_max, range_min = scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min)", "training and validation #2.1 init --> keep this in the main tf.reset_default_graph() #", "min_y)) max_g, min_g = scimpute.max_min_element_in_arrs([G.values]) print('Max in G is {}, Min in G", "mini-batch imputation and saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None)", "input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid of input_df", "list=List, tag='(Imputation Discrete) ', dir=gene_pair_dir) print(\"\\n> Discrete imputation vs ground truth\") gene_dir =", "p) # visualize results using all genes visualize_all_genes(X, Y, G, p) # visualize", "= 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L, 3e-5 for 9L", "= 3): #5.2 define layers and variables # input p, X, pIn_holder, pHidden_holder,", "between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each gene\\n(Ground_truth vs", "mode. If the number of cells is larger than a threshold (large_size: 1e5),", "pIn_holder, pHidden_holder, input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of the", "= read_data(p) X = pd.DataFrame(data=X.todense(), index=cell_ids, columns=gene_ids) Y = scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input", "with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) # #", "curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1 or", "import sys import os import numpy as np import pandas as pd import", "mse1_nz = round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 =", "parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode options: pre-training | late", "mse_nz_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters: ----------- skip: epoch_log:", "= [], [], [] mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X,", "epoch_log.append(epoch) mse_train, mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid", "saving output: ', '{} M'.format(usage())) else: df_out_batch.to_csv(handle, header=None) latent_code_df.to_csv(handle2, header=None) handle2.close() else: #", "GradDescent used # training mse and mse_nz of the last batch mse_batch, mse_nz_batch,", "toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, '; mse_valid:', mse_valid)", "truth is not provide, # input is used as ground truth print('\\n> Correlations", "for selected genes\") print(\"ground truth vs imputation, ground truth vs input\") gene_dir =", "and reporter vectors num_batch = int(math.floor(len(train_idx) // p.batch_size)) # floor epoch_log = []", "m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={", "visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X, Y, G, p) def", ") _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid']", "sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx = np.random.choice( range(len(cell_ids_valid)), min(sample_size, len(cell_ids_valid)))", "display based on mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:')", "= input_df.index print('RAM usage before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working", "gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: # For smaller files (hd5, csv, csv.gz)", "print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X, Y, G): '''calculate", "d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1',", "mse_nz_batch, h_batch = sess.run( [mse, mse_nz, h], feed_dict={X: x_batch, pHidden_holder: 1.0, pIn_holder: 1.0}", "pIn_holder: 1, pHidden_holder: 1 } ) latent_code_df = pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] )", "max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression]) scimpute.hist_df(", "splitting input data is: {} M'.format(usage())) # todo: for backward support for older", "PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:', p.fname_input) print('name_input:', p.name_input) print('ori_input:', p.ori_input) print('transformation_input:', p.transformation_input)", "print('RAM usage after splitting input data is: {} M'.format(usage())) # todo: for backward", "'load_saved': print('*** In TL Mode') saver.restore(sess, \"./step1/step1.ckpt\") elif p.run_flag == 'rand_init': print('*** In", "{}'.format(p.L)) for l_tmp in range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef)", "mode == 'pre-training': # step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag", "process = psutil.Process(os.getpid()) ram = process.memory_info()[0] / float(2 ** 20) ram = round(ram,", "p.snapshot_step = 5 # interval of saving session, imputation p.m = 1000 p.n", "trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong') #2.5 Init a", "p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------ p: Return", "all genes Parameters ------------ X: input data matrix; genes in columns (same below)", "for l1 in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name =", "on mini-batches for i in range(num_batch): # x_batch indices = np.arange(p.batch_size * i,", "G, p): '''calculate and visualize standard deviation in each gene write SDs to", "mse1 = round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz =", "and Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2", "| analysis') parser.add_argument('-infile', help='file path of input data') return parser.parse_args(argv) if __name__ ==", "n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage),", "'), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j) + '\\n(Ground_truth vs", "'rand_init': print('*** In Rand Init Mode') init = tf.global_variables_initializer() sess.run(init) elif p.run_flag ==", "tic_log = time.time() print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format( epoch,", "on small data blocks to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:',", "input_df.transpose(), transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df mem usage: ')", "p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2", "round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file in", "results of epoch 1 and all display steps (epochs) if (epoch == 1)", "------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage before reading data: {}", "p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ", "transformation=p.transformation_input ).transpose() # [genes, cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep')", "saving session, imputation p.m = 1000 p.n = 300 p.sample_size = int(240) print('in", "gene ID does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth", "3) #2.3 define loss # input X, h, p # return mse_nz, mse,", "cells between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth", "# todo: support sparse matrix X = X.loc[Y.index, Y.columns] G = G.loc[Y.index, Y.columns]", "main optimizer = tf.train.AdamOptimizer(p.learning_rate) if p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer", "Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G,", "# Test or not: m*n subset (1000 * 300). Delete later if p.test_flag:", "scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj) gc.collect() print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) #", "scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b =", "start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx,", "G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max,", "from step1, for transfer learning p.stage = 'step2' # step1/step2 (not others) p.run_flag", "model on the whole dataset Parameters: ----------- ''' Y_input_arr = sess.run(h, feed_dict={X: input_data,", "w_arr = sess.run(w) b_arr = sess.run(b) b_arr = b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T", "for older parameter files only # sample_size is 1000 in default; if sample_size", "matrix') x_std_df, y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose:", "in range(1, en_de_layers+1): encoder_weight_name = 'e_w'+str(l1) encoder_bias_name = 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name", "and tSNE plots print('\\n> Generating PCA and tSNE plots') if p.cluster_file is not", "scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise',", "p.mse_mode in ('mse_omega', 'mse_nz'): print('training on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif", "# step2/rand_init for one step training p.stage = 'step2' p.run_flag = 'rand_init' p.learning_rate", "To sparse input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid", "gene_ids, cell_ids, p, m) print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop", "a small sample set (sample_input) print(\"> Impute and save.. \") if m >", "e_a2, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd)", "= max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression)", "param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' +", "np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids = cell_ids_train[rand_idx] rand_idx =", "= scimpute.read_data_into_cell_row(p.fname_imputation, p.ori_imputation) if p.fname_input == p.fname_ground_truth: G = X else: G =", "between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y, G) mse2_nz = round(mse2_nz,", "and use name 'p' #print(\"Usage: python late.py -mode <late> -infile <xx.hd5>\") argms =", "{} took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2),", "{}'.format(p.sample_size)) return p # to do: modify to display based on mode #", "on mse_nz') trainer = optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on", "round(mse1_nz, 7) print('MSE1_NZ between Imputation and Input: ', mse1_nz) mse1 = scimpute.mse(Y, X)", "start_idx, end_idx) x_out_batch = input_matrix[start_idx:end_idx, :].todense() y_out_batch = sess.run( h, feed_dict={ X: x_out_batch,", "model changed # define input/output a_bottleneck = e_a1 else: raise Exception(\"{} L not", "scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix = input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids", "already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X", "log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix, gene_ids, cell_ids", "impute and output p.stage = 'impute' p.run_flag = 'impute' p.learning_rate = 0.0 elif", "# [genes, cells] in df_trans() print('pandas input_df mem usage: ') input_df.info(memory_usage='deep') # Test", "p.stage)) else: Y_input_df = fast_imputation(sess, h, X, pIn_holder, pHidden_holder, input_matrix.todense(), gene_ids, cell_ids) scimpute.save_hd5(Y_input_df,", "this code, matrices should have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation,", "p.ori_input) print('RAM usage after reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA", "mse curves to csv files Parameters: ----------- skip: epoch_log: mse_nz_batch_vec: mse_nz_valid_vec: stage: '''", "based on mode # def display_params(p): # PRINT PARAMETERS print('\\nmode:', p.mode) print('\\nData:') print('fname_input:',", "range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes', ylab='Cells', vmax=range_max, vmin=range_min, dir=p.tag) scimpute.heatmap_vis(X.values,", "pHidden_holder) # todo: change input activations if model changed # define input/output a_bottleneck", "1}) # save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def", "(m > p.large_size), only save the #imputation results of a small sample set", "5: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1,", "print('\\n') def read_data(p): '''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..')", "9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output p.stage =", "p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 =", "d_a3 if p.L == 7: # change with layer with tf.name_scope('Encoder_L1'): e_w1, e_b1", "= round(ram, 1) return ram # sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version)", "= scimpute.max_min_element_in_arrs([Y.values, G.values, X.values]) print('\\nrange:', range_max, ' ', range_min) scimpute.heatmap_vis(Y.values, title='Imputation ({})'.format(p.name_imputation), xlab='Genes',", "input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) tf.set_random_seed(rand_state) # seed", "= scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', d_a3, d_w2, d_b2, pHidden_holder) with", "does not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground Truth vs Imputation)", "of gene expression max_expression = max(G.values.max(), X.values.max(), Y.values.max()) min_expression = min(G.values.min(), X.values.min(), Y.values.min())", "0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2, float_format='%.6f') print('RAM usage during mini-batch imputation and saving output:", "1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch, ';", "p.fname_input, p.ori_input, p.transformation_input,'\\n', X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3])", "M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w", "# msej = MSE(X, h), for genej, nz_cells print('RAM usage after building the", "# input pIn_holder = tf.placeholder(tf.float32, name='p.pIn') #keep_prob for dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob", "#return cell_ids_train, cell_ids_valid, cell_ids_test m, n = input_matrix.shape input_train, input_valid, input_test, train_idx, valid_idx,", "scimpute.pca_tsne(df_cell_row=Y, cluster_info=cluster_info, title=p.name_imputation, dir=p.tag) scimpute.pca_tsne(df_cell_row=X, cluster_info=cluster_info, title=p.name_input, dir=p.tag) scimpute.pca_tsne(df_cell_row=G, cluster_info=cluster_info, title=p.name_ground_truth, dir=p.tag) def", "p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size)", "a_bottleneck = e_a3 elif p.L == 5: # change with layer with tf.name_scope('Encoder_L1'):", "not exist') continue scimpute.scatterplot2(G_j, Y_j, range='same', title=str(str(j) + '\\n(Ground_truth vs Imputation) '), xlabel='Ground", "e_w1, e_b1, e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1,", "= sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse, mse_nz],feed_dict={X:", "m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del rand_idx gc.collect() np.random.seed() else:", "# d_a4 = scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 =", "'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5 for 3-7L, 3e-6 for", "expression values # and re-generate pairwise plots Y = scimpute.df_exp_discretize_log10(Y) print('\\n> Discrete gene", "than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) #", "define placeholders and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder", "range(1, p.l+1): print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ',", "= input_obj.matrix.log1p() input_matrix = input_obj.matrix gene_ids = input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage", "d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2',", "print('for ', j) Y_j = Y.ix[:, j] G_j = G.ix[:, j] X_j =", "for 3-7L, 3e-5 for 9L elif mode == 'translate': # step2/load_saved from step1,", "plots print('\\n> Generating PCA and tSNE plots') if p.cluster_file is not None: cluster_info", "p.mode == 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list:", "d_a1 return a_bottleneck, h def build_metrics(X, h, coef): with tf.name_scope(\"Metrics\"): omega = tf.sign(X)", "> 0; not possibly < 0 in our data mse_nz = tf.reduce_mean( tf.multiply(", "columns (same below) Y: imputed data matrix G: ground truth ''' # print('>READING", "imputed data matrix G: ground truth ''' # print('>READING DATA..') # X =", "of Imputation SD vs Input SD', title='', range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yg_df, xlab='Ratio", "= 0.0 ## save_whole_imputation save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder, pHidden_holder, input_matrix, gene_ids, cell_ids,", "else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y = scimpute.df_transformation(Y.transpose(), transformation=p.transformation_imputation).transpose()", "sample_valid = input_valid.todense() sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len", "j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j] G_j =", "d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1, p.sd) d_a2 = scimpute.dense_layer('decoder2', e_a2, d_w2, d_b2, pHidden_holder)", "Y, G, p) def parse_args(argv): parser = argparse.ArgumentParser(description = 'Help information') parser.add_argument('-mode', help='mode", "G: ground truth p: parameters Return ----------- None ''' # histograms of gene", "less than the number of cells (m), # we reconstruct the training and", "# sys.path.append('./bin') # print('sys.path', sys.path) #print('python version:', sys.version) #print('tf.__version__', tf.__version__) def late_main(p, log_dir,", "'impute': print('*** In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate", "= 1 # interval on learning curve p.snapshot_step = 5 # interval of", "mse and mse_nz of the last batch mse_batch, mse_nz_batch, h_batch = sess.run( [mse,", "= scimpute.dense_layer('decoder1', e_a1, d_w1, d_b1, pHidden_holder) # todo: change input activations if model", "in imputation and ground truth # when ground truth is not provide, #", "scimpute.gene_pair_plot(G, list=List, tag='(Ground_truth)', dir=gene_pair_dir) print(\"\\n> Scatterplots for selected genes\") print(\"ground truth vs imputation,", "= 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/' + param_file).load_module()", "dropout pHidden_holder = tf.placeholder(tf.float32, name='p.pHidden')#keep_prob for dropout #2.2 define layers and variables #", "else: raise Exception('run_flag err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch',", "1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage, en_de_layers=p.l) #visualize_weights() visualize_weights(sess, p.stage,", "infile): '''load the 'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name", "input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else: #", "p: Return ----------- ''' print('>READING DATA..') print('RAM usage before reading data: {} M'.format(usage()))", "MSE between imputation and input MSE between imputation and ground truth Parameters ------------", "0) or (epoch == p.max_training_epochs): tic_log2 = time.time() #1.save imputation results #if the", "ground truth print('\\n> Correlations between ground truth and imputation') print('ground truth dimension: ',", "y_std_df = scimpute.nz_std(X, Y) x_std_df, g_std_df = scimpute.nz_std(X, G) # purpose: compare G", "p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def read_data(p): '''READ DATA Parameters ------------", "the snapshot results of the current model on the whole dataset Parameters: -----------", "In impute mode loading \"step2.ckpt\"..') saver.restore(sess, './step2/step2.ckpt') p.max_training_epochs = 0 p.learning_rate = 0.0", "mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h], feed_dict={X: sample_valid, pHidden_holder: 1.0, pIn_holder:", "between Imputation and Ground_truth: ', mse2) return mse1_nz, mse1, mse2_nz, mse2 def analyze_variation_in_genes(X,", "for pair in List for gene in pair] for j in gene_list: try:", "i in range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch =", "mse_nz, mse, reg_term = build_metrics(X, h, p.reg_coef) #2.4 costruct the trainer --> keep", "of input data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module", "save indexes #input p, input_matrix, cell_ids #return cell_ids_train, cell_ids_valid, cell_ids_test m, n =", "# '''for file in {0}/*npy # do python -u weight_clustmap.py $file {0} #", "visualize_selected_genes(X, Y, G, p): ''' generate plots for genes specified by the user", "rand_state=3): ##0. read data and extract gene IDs and cell IDs input_matrix, gene_ids,", "p if p.test_flag: p.max_training_epochs = 10 # 3L:100, 5L:1000, 7L:1000, 9L:3000 p.display_step =", "M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose()", "Parameters ------------ X: input data matrix; genes in columns (same below) Y: imputed", "parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter module and use name 'p'", "and validation sets by randomly sampling. try: p.sample_size sample_size = p.sample_size except: sample_size", "1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch %", "e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3,", "in zip(y_std_df.values, x_std_df.values)] std_ratio_yx_df =pd.DataFrame(data = std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if", "os.getcwd() param_file = 'global_params.py' param_name = param_file.rstrip('.py') p = SourceFileLoader(param_name, cwd + '/'", "m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage, p.stage), 'w') with open('./{}/imputation.{}.csv'.format(p.stage, p.stage), 'w') as", "data X, Y, G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz, mse2", "time from scipy.sparse import csr_matrix import gc import matplotlib matplotlib.use('Agg') import scimpute def", "ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec)))", "print('imputation finished') #toc_stop = time.time() #print(\"reading took {:.1f} seconds\".format(toc_stop - tic_start)) exit() else:", "truth p: parameters Return ----------- None ''' # histograms of gene expression max_expression", "Truth vs Imputation) '), xlabel='Ground Truth', ylabel='Imputation', dir=gene_dir ) scimpute.scatterplot2(G_j, X_j, range='same', title=str(str(j)", "'step2' # step1/step2 (not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5", "Y, G, p) # visualize results using all genes visualize_all_genes(X, Y, G, p)", "gene_ids, cell_ids = read_data(p) ##1. split data and save indexes #input p, input_matrix,", "3e-5 for 9L elif mode == 'impute': # step2/load_saved/learning_rate=0, just impute and output", "# x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch", "p.sample_size except: sample_size = int(9e4) if sample_size < m: np.random.seed(1) rand_idx = np.random.choice(", "range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m) print('saving:', start_idx, end_idx) x_out_batch =", "refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3. load data input_matrix,", "mse_nz_valid_vec))) _ = pd.DataFrame(data=_, index=epoch_log, columns=['Epoch', 'MSE_NZ_batch', 'MSE_NZ_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse_nz.csv\".format(stage)) def fast_imputation(sess, h,", "genes in columns (same below) Y: imputed data matrix G: ground truth Return", "d_w2, d_b2, pHidden_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n, p.sd) d_a1", "# input X, h, p # return mse_nz, mse, reg_term mse_nz, mse, reg_term", "# Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation( input_df.transpose(), transformation=p.transformation_input ).transpose() #", "mse2_nz, mse2 def analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation in", "the sample validation set (1000) mse_valid, mse_nz_valid, Y_valid = sess.run( [mse, mse_nz, h],", "learning curve p.snapshot_step = 5 # interval of saving session, imputation p.m =", "# save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) latent_code = sess.run(a_bottleneck, feed_dict={X:", "p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for", "extract gene IDs and cell IDs input_matrix, gene_ids, cell_ids = read_data(p) ##1. split", "M'.format(usage())) gc.collect() # Data Transformation print('> DATA TRANSFORMATION..') input_matrix = scimpute.sparse_matrix_transformation(input_matrix, p.transformation_input) del(input_obj)", "plots using all genes Parameters ------------ X: input data matrix; genes in columns", "decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage, decoder_weight_name, stage),", "min(G.values.min(), X.values.min(), Y.values.min()) print('\\n max expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y,", "pair] for j in gene_list: try: print('for ', j) Y_j = Y.ix[:, j]", "err') # define tensor_board writer batch_writer = tf.summary.FileWriter(log_dir + '/batch', sess.graph) valid_writer =", "sample_input_cell_ids = cell_ids sample_train_cell_ids = cell_ids_train sample_valid_cell_ids = cell_ids_valid print('len of sample_train: {},", "pre-training | late | translate | impute | analysis') parser.add_argument('-infile', help='file path of", "scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2, d_b2 = scimpute.weight_bias_variable('decoder2', p.n_hidden_2, p.n_hidden_1,", "tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse) return mse_nz, mse, reg_term", "X, pIn_holder, pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess,", "seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board writer batch_writer", "e_a1, e_w2, e_b2, e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2,", "p.tag+'/genes' # genetate a list of genes using the gene_pair_list gene_list = [gene", "{:.1f} seconds\".format(toc_stop - tic_start)) exit() else: raise Exception('run_flag err') # define tensor_board writer", "parameters Return ----------- None ''' print('\\n calculating standard deviation in each gene for", "/ float(2 ** 20) ram = round(ram, 1) return ram # sys.path.append('./bin') #", "def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv files Parameters:", "reading input_df: {} M'.format(usage())) # Data Transformation print('> DATA TRANSFORMATION..') input_df = scimpute.df_transformation(", "# X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X, gene_ids, cell_ids = read_data(p) X = pd.DataFrame(data=X.todense(),", "= tf.reduce_mean(tf.pow(h, 2)) * coef tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2))", "For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM usage after", "dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv') def visualize_all_genes(X, Y, G, p): ''' generate plots using all", "sess.graph) valid_writer = tf.summary.FileWriter(log_dir + '/valid', sess.graph) # prep mini-batch, and reporter vectors", "data matrix G: ground truth p: parameters Return ----------- None ''' gene_pair_dir =", "in range(1, en_de_layers+1): encoder_weight = 'e_w'+str(l1) encoder_bias = 'e_b'+str(l1) visualize_weight(sess, stage, encoder_weight, encoder_bias)", "n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1,", "time.clock(), time.time() #2.7.2 save the results of epoch 1 and all display steps", "or not if p.test_flag: print('in test mode') input_df = input_df.ix[:p.m, :p.n] gc.collect() #", "expression:', max_expression) print('\\n min expression:', min_expression) scimpute.hist_df( Y, xlab='Expression', title='Imputation({})'.format(p.name_imputation), dir=p.tag, range=[min_expression, max_expression])", "[], [], [] mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h)", "p.stage), cell_ids_train, fmt='%s') np.savetxt('{}/valid.{}_index.txt'.format(p.stage, p.stage), cell_ids_valid, fmt='%s') np.savetxt('{}/test.{}_index.txt'.format(p.stage, p.stage), cell_ids_test, fmt='%s') print('RAM usage", "if m > p.large_size: #impute on small data blocks to avoid high memory", "'''load the 'global_params.py' file ''' cwd = os.getcwd() param_file = 'global_params.py' param_name =", "Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max in Y is {}, Min in Y", "imputation') print('ground truth dimension: ', G.shape, 'imputation dimension: ', Y.shape) print('generating histogram for", "sparse input_matrix = csr_matrix(input_df) # todo: directly read into csr, get rid of", "X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY print('\\nIn this code, matrices should have", "cells (m), # we reconstruct the training and validation sets by randomly sampling.", "Y.ix[0:p.m, 0:p.n] G = G.ix[0:p.m, 0:p.n] X = X.ix[0:p.m, 0:p.n] # INPUT SUMMARY", "= scimpute.dense_layer('decoder4', e_a4, d_w4, d_b4, pHidden_holder) with tf.name_scope('Decoder_L3'): d_w3, d_b3 = scimpute.weight_bias_variable('decoder3', p.n_hidden_3,", "print('\\n> Correlations between ground truth and imputation') print('ground truth dimension: ', G.shape, 'imputation", "'__main__': ##1. load parameter module and use name 'p' #print(\"Usage: python late.py -mode", "== 'pre-training': # step1/rand_init for pre-training on reference p.stage = 'step1' p.run_flag =", "np.random.seed(1) rand_idx = np.random.choice( range(len(cell_ids_train)), min(sample_size, len(cell_ids_train))) sample_train = input_train[rand_idx, :].todense() sample_train_cell_ids =", "#imputation results of a small sample set (sample_input) print(\"> Impute and save.. \")", "save sample imputation Y_input_df = pd.DataFrame(data=Y_input_arr, columns=gene_ids, index=cell_ids) return Y_input_df #def save_whole_imputation: def", "input_data, gene_ids, cell_ids): '''Calculate /and save/ the snapshot results of the current model", "print('pIn:', p.pIn) print('pHidden:', p.pHidden) print('max_training_epochs:', p.max_training_epochs) print('display_step', p.display_step) print('snapshot_step', p.snapshot_step) elif p.mode ==", "to files plot histograms of SDs Parameters ------------ X: input data matrix; genes", "dropout #2.2 define layers and variables # input p, X, pIn_holder, pHidden_holder, n", "# Summary of data print(\"name_input:\", p.name_input) _ = pd.DataFrame(data=input_matrix[:20, :4].todense(), index=cell_ids[:20], columns=gene_ids[:4]) print(\"input_df:\\n\",", "', '{} M'.format(usage())) scimpute.save_hd5(Y_input_df, \"{}/imputation.{}.hd5\".format(p.stage, p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name,", "path of input data') return parser.parse_args(argv) if __name__ == '__main__': ##1. load parameter", "* i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch =", "gc import matplotlib matplotlib.use('Agg') import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save", "= scimpute.nz_std(X, G) # purpose: compare G with Y #std_ratio_yx_df = pd.DataFrame(data= y_std_df.values", "Return ----------- None ''' gene_pair_dir = p.tag+'/pairs' List = p.gene_pair_list print(\">n> Scatterplots of", "e_b1, pIn_holder) with tf.name_scope('Encoder_L2'): e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 =", "mse_nz_batch_vec, mse_nz_valid_vec = [], [] #, mse_nz_train_vec = [], [], [] mse_batch_vec, mse_valid_vec", "G: ground truth ''' # print('>READING DATA..') # X = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) X,", "', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between Imputation", "snapshot step if (epoch % p.snapshot_step == 0) or (epoch == p.max_training_epochs): tic_log2", "feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage, p.stage), code_bottleneck_input) #save_weights() save_weights(sess, p.stage,", "genes visualize_all_genes(X, Y, G, p) # visualize selected genes visualize_selected_genes(X, Y, G, p)", "save_whole_imputation(sess, X, h, a_bottleneck, pIn_holder,pHidden_holder, input_matrix, gene_ids, cell_ids, p, m): ''' calculate and", "Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation output Parameters ------------ p: parameters", "# with tf.name_scope('Encoder_L4'): # # e_w4, e_b4 = scimpute.weight_bias_variable('encoder4', p.n_hidden_3, p.n_hidden_4, p.sd) #", "np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices] # x_batch = df1_train.ix[ridx_batch, :] x_batch", "m the # of cells is less than large_size (1e5)) Y_input_arr = sess.run(h,", "'''READ DATA Parameters ------------ p: Return ----------- ''' print('>READING DATA..') print('RAM usage before", "decoder_weight, decoder_bias) def save_weights(sess, stage, en_de_layers): print('save weights in npy') for l1 in", "spelled wrong') #2.5 Init a session accoding to the run_flag sess = tf.Session()", "import scimpute def learning_curve_mse(epoch_log, mse_batch_vec, mse_valid_vec, stage, skip=1): '''Save mse curves to csv", "print(\"n_hidden{}: {}\".format(l_tmp, eval('p.n_hidden_'+str(l_tmp)))) print('learning_rate:', p.learning_rate) print('reg_coef:', p.reg_coef) print('batch_size:', p.batch_size) print('sample_zie: ', p.sample_size) print('pIn:',", "print('training on mse') trainer = optimizer.minimize(mse + reg_term) else: raise Exception('mse_mode spelled wrong')", "3) toc_stop = time.time() time_finish = round((toc_stop - tic_start), 2) print(\"Imputation Finished!\") print(\"Wall", "input_obj.gene_ids cell_ids = input_obj.barcodes print('RAM usage after reading sparse matrix: {} M'.format(usage())) gc.collect()", "p.n_hidden_3, p.n_hidden_2, p.sd) d_a3 = scimpute.dense_layer('decoder3', e_a3, d_w3, d_b3, pHidden_holder) with tf.name_scope('Decoder_L2'): d_w2,", "working on mac print('RAM usage after deleting input_df: {} M'.format(usage())) # Summary of", "== 'analysis': print('fname_imputation:', p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ',", "G = X else: G = scimpute.read_data_into_cell_row(p.fname_ground_truth, p.ori_ground_truth) # print('> DATA TRANSFORMATION..') Y", "optimizer.minimize(mse_nz + reg_term) elif p.mse_mode == 'mse': print('training on mse') trainer = optimizer.minimize(mse", "0 in our data mse_nz = tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) )", "reference p.stage = 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4", "import tensorflow as tf from importlib.machinery import SourceFileLoader import math import psutil import", "std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df( y_std_df, xlab='Standard", "write SDs to files plot histograms of SDs Parameters ------------ X: input data", "'mse_omega'): #learning_curve_mse_nz(skip=math.floor(epoch / 5 / p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5", "range=(std_min, std_max), dir=p.tag) scimpute.hist_df( std_ratio_yx_df, xlab='Ratio of Imputation SD vs Input SD', title='',", "== 'late') or (p.mode == 'translate'): print('data split: [{}/{}/{}]'.format(p.a, p.b, p.c)) print('\\nParameters:') print('mse_mode:',", "# ) print('min_mse_nz_valid till now: {}'.format(min_mse_valid)) print('snapshot_step: {}s'.format(log2_time)) batch_writer.close() valid_writer.close() sess.close() def build_late(X,", "G.ix[:, j] X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does not", "pHidden_holder, sample_input, gene_ids, sample_input_cell_ids) scimpute.save_hd5(Y_input_df, \"{}/sample_imputation.{}.hd5\".format(p.stage, p.stage)) else: Y_input_df = fast_imputation(sess, h, X,", "scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Encoder_L2'):", "save bottleneck_representation\") code_bottleneck_input = sess.run(a_bottleneck, feed_dict={ X: sample_input, pIn_holder: 1, pHidden_holder: 1}) np.save('{}/code_neck_valid.{}.npy'.format(p.stage,", "mse curves to csv files Parameters: ----------- skip: epoch_log: mse_batch_vec: mse_valid_vec: stage: step1", "rand_idx = np.random.choice(range(m), min(sample_size, m)) sample_input = input_matrix[rand_idx, :].todense() sample_input_cell_ids = cell_ids[rand_idx] del", "= time.time() print('#Epoch {} took: {} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu", "'MSE_batch', 'MSE_valid'] ).set_index('Epoch') _.to_csv(\"./{}/mse.csv\".format(stage)) #def learning_curve_mse_nz(skip=1): def learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, stage, skip=1): '''Save", "mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step == 0)", "tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h,", "small data blocks to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches)", "input p, X, pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h =", "b_arr.reshape(len(b_arr), 1) b_arr_T = b_arr.T scimpute.visualize_weights_biases(w_arr, b_arr_T, '{},{}.{}'.format(w_name, b_name, stage), dir=stage) def visualize_weights(sess,", "g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for y, x", "= e_a1 else: raise Exception(\"{} L not defined, only 3, 5, 7 implemented\".format(p.L))", "Y, nz)', dir=stage, skip=skip ) _ = np.asarray(list(zip(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec))) _ = pd.DataFrame(data=_,", "Ground_truth: ', mse2_nz) mse2 = scimpute.mse(Y, G) mse2 = round(mse2, 7) print('MSE2 between", "/ p.display_step)) learning_curve_mse_nz(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, p.stage, skip=math.floor(epoch / 5 / p.display_step)) elif p.mse_mode", "of Imputation SD vs Ground Truth SD', title='', range=(std_min, std_max), dir=p.tag) std_ratio_yx_df.to_csv('sd_ratio_imputed_vs_input.csv') std_ratio_yg_df.to_csv('sd_ratio_imputed_vs_groundtruth.csv')", "= pd.DataFrame( data=latent_code, index=cell_ids[range(start_idx, end_idx)] ) if i_ == 0: df_out_batch.to_csv(handle, float_format='%.6f') latent_code_df.to_csv(handle2,", "is less than large_size (1e5)) Y_input_arr = sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder:", "call late late_main(input_matrix, gene_ids, cell_ids, p, log_dir, rand_state = 3) toc_stop = time.time()", "in zip(y_std_df.values, g_std_df.values)] std_ratio_yg_df = pd.DataFrame(data= std_ratio_yg_data, index=X.columns, columns=['sd_ratio']) std_min = min(y_std_df.min(), x_std_df.min(),", "input_matrix, gene_ids, cell_ids = read_data(p) ##1. split data and save indexes #input p,", "# X = scimpute.df_transformation(X.transpose(), transformation=p.transformation_input).transpose() if p.fname_input == p.fname_ground_truth: G = X else:", "y_std_df.values / g_std_df.values, index=X.columns, columns=['sd_ratio']) std_ratio_yx_data = [(y/x if x!=0 else None) for", "analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs = 10 # 3L:100,", "scimpute.dense_layer('encoder1', X, e_w1, e_b1, pIn_holder) with tf.name_scope('Decoder_L1'): d_w1, d_b1 = scimpute.weight_bias_variable('decoder1', p.n_hidden_1, n,", "entered cannot be recognized.') print('Valid mode options: pre-training | late | translate |", "on learning curve p.snapshot_step = 5 # interval of saving session, imputation p.m", "'''calculate MSEs MSE between imputation and input MSE between imputation and ground truth", "e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 =", "'\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def result_analysis_main(p): '''analyzing imputation", "scimpute.learning_curve(epoch_log, mse_nz_batch_vec, mse_nz_valid_vec, title=\"Learning Curve MSE_NZ.{}\".format(stage), ylabel='MSE_NZ (X vs Y, nz)', dir=stage, skip=skip", "X.ix[0:20, 0:3]) print('G (ground truth):', p.fname_ground_truth, p.ori_ground_truth, p.transformation_ground_truth,'\\n', G.ix[0:20, 0:3]) print('Y.shape', Y.shape) print('X.shape',", "of epoch 1 and all display steps (epochs) if (epoch == 1) or", "= round(toc_log2 - tic_log2, 1) min_mse_valid = min(mse_nz_valid_vec) # os.system( # '''for file", "np.save('{}/{}.{}'.format(stage, encoder_bias_name, stage), sess.run(eval(encoder_bias_name))) np.save('{}/{}.{}'.format(stage, decoder_bias_name, stage), sess.run(eval(decoder_bias_name))) def usage(): process = psutil.Process(os.getpid())", "the number of cells (m), # we reconstruct the training and validation sets", "transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n', Y.ix[0:20, 0:3]) print('X (input):', p.fname_input,", "n_genes print('input_matrix: {} cells, {} genes\\n'.format(m, n)) return input_matrix, gene_ids, cell_ids def load_results(p):", "if p.fname_input == p.fname_ground_truth: G = X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() #", "truth vs input\") gene_dir = p.tag+'/genes' # genetate a list of genes using", "x_std_df.min(), g_std_df.min()) std_max = max(y_std_df.max(), x_std_df.max(), g_std_df.max()) print('generating histograms of standard deviations') scimpute.hist_df(", "tic_log, 1))) mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch", "tf.summary.scalar('mse_nz__Y_vs_X', mse_nz) mse = tf.reduce_mean(tf.pow(X - h, 2)) # for report tf.summary.scalar('mse__Y_vs_X', mse)", "Input: ', mse1_nz) mse1 = scimpute.mse(Y, X) mse1 = round(mse1, 7) print('MSE1 between", "sess.run(h, feed_dict={X: input_matrix.todense(), pIn_holder: 1, pHidden_holder: 1}) # save sample imputation Y_input_df =", "mse_nz_train = sess.run([mse, mse_nz], feed_dict={X: sample_train,pHidden_holder: 1.0, pIn_holder: 1.0}) mse_valid, mse_nz_valid = sess.run([mse,", "e_w2, e_b2 = scimpute.weight_bias_variable('encoder2', p.n_hidden_1, p.n_hidden_2, p.sd) e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2,", "pIn_holder, pHidden_holder, n # return a_bottleneck, h(d_a1) a_bottleneck, h = build_late(X, pHidden_holder, pIn_holder,", "= 300 p.sample_size = int(240) print('in test mode\\n', 'num-genes set to {}, num-cells", "= X else: G = scimpute.df_transformation(G.transpose(), transformation=p.transformation_ground_truth).transpose() # subset/sort X, G to match", "layer with tf.name_scope('Encoder_L1'): e_w1, e_b1 = scimpute.weight_bias_variable('encoder1', n, p.n_hidden_1, p.sd) e_a1 = scimpute.dense_layer('encoder1',", "(not others) p.run_flag = 'load_saved' # rand_init/load_saved p.learning_rate = 3e-5 # step2: 3e-5", "e_a2 = scimpute.dense_layer('encoder2', e_a1, e_w2, e_b2, pHidden_holder) with tf.name_scope('Encoder_L3'): e_w3, e_b3 = scimpute.weight_bias_variable('encoder3',", "sampling. try: p.sample_size sample_size = p.sample_size except: sample_size = int(9e4) if sample_size <", "Return ----------- 4 MSEs ''' print('\\n> MSE Calculation') max_y, min_y = scimpute.max_min_element_in_arrs([Y.values]) print('Max", "input data is: {} M'.format(usage())) # todo: for backward support for older parameter", "to avoid high memory cost n_out_batches = m//p.sample_size print('num_out_batches:', n_out_batches) handle2 = open('./{}/latent_code.{}.csv'.format(p.stage,", "= time.clock(), time.time() ridx_full = np.random.choice(len(train_idx), len(train_idx), replace=False) #2.7.1 training model on mini-batches", "and variables X = tf.placeholder(tf.float32, [None, n], name='X_input') # input pIn_holder = tf.placeholder(tf.float32,", "print('RAM usage after {} transformation: {} M'.format(p.transformation_input, usage())) # Test or not: m*n", "as handle: for i_ in range(n_out_batches+1): start_idx = i_*p.sample_size end_idx = min((i_+1)*p.sample_size, m)", "G is {}, Min in G is{}'.format(max_g, min_g)) mse1_nz = scimpute.mse_omega(Y, X) mse1_nz", "= tf.reduce_mean( tf.multiply( tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term", "index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for y, x in", "print('len of sample_train: {}, sample_valid: {}, sample_input {}'.format( len(sample_train_cell_ids), len(sample_valid_cell_ids), len(sample_input_cell_ids) )) ##2.", "p.fname_imputation) print('transformation_imputation', p.transformation_imputation) print('fname_ground_truth: ', p.fname_ground_truth) print('transformation_ground_truth', p.transformation_ground_truth) print('gene_pair_list: ', p.gene_pair_list) print('\\n') def", "0:3]) print('Y.shape', Y.shape) print('X.shape', X.shape) print('G.shape', G.shape) return X, Y, G def calculate_MSEs(X,", "SDs Parameters ------------ X: input data matrix; genes in columns (same below) Y:", "pIn_holder: 1.0} ) toc_log = time.time() print('mse_nz_batch:{}; mse_omage_valid: {}'. format(mse_nz_batch, mse_nz_valid)) print('mse_batch:', mse_batch,", "= scimpute.weight_bias_variable('encoder3', p.n_hidden_2, p.n_hidden_3, p.sd) e_a3 = scimpute.dense_layer('encoder3', e_a2, e_w3, e_b3, pHidden_holder) #", "cannot be recognized.') print('Valid mode options: pre-training | late | translate | impute", "plots for genes specified by the user Parameters ------------ X: input data matrix;", "e_a2, e_w3, e_b3, e_a3 global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3,", "X_j = X.ix[:, j] except KeyError: print('KeyError: gene ID does not exist') continue", "for 10x genomics large h5 files input_obj = scimpute.read_sparse_matrix_from_h5(p.fname_input, p.genome_input, p.ori_input) # gene_be_matrix.matrix", "Y.values, title=\"Correlation for each gene\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='column-wise', nz_mode='first' #", "input data X, Y, G = load_results(p) # calculate MSEs mse1_nz, mse1, mse2_nz,", "sample set (sample_input) print(\"> Impute and save.. \") if m > p.large_size: Y_input_df", "mse_batch_vec, mse_valid_vec = [], [] # mse = MSE(X, h) #msej_batch_vec, msej_valid_vec =", "mse_batch_vec.append(mse_batch) mse_valid_vec.append(mse_valid) mse_nz_batch_vec.append(mse_nz_batch) mse_nz_valid_vec.append(mse_nz_valid) epoch_log.append(epoch) #2.7.3 save snapshot step if (epoch % p.snapshot_step", "p.m = 1000 p.n = 300 p.sample_size = int(240) print('in test mode\\n', 'num-genes", "global d_w1, d_b1, d_a1, d_w2, d_b2, d_a2, d_w3, d_b3, d_a3 if p.L ==", "= 'step1' p.run_flag = 'rand_init' p.learning_rate = 3e-4 # step1: 3e-4 for 3-7L,", "{} CPU seconds; {} Wall seconds'.format( epoch, round(toc_cpu - tic_cpu, 2), round(toc_wall -", "matrices should have already been transformed into cell_row') print('Y (imputation):', p.fname_imputation, p.ori_imputation, p.transformation_imputation,'\\n',", "=='invalid': exit(0) ##2. refresh folder log_dir = './{}'.format(p.stage) scimpute.refresh_logfolder(log_dir) tic_start = time.time() #3.", "np.random.seed() else: sample_input = input_matrix.todense() sample_train = input_train.todense() sample_valid = input_valid.todense() sample_input_cell_ids =", "tf.pow(X-h, 2), omega ) ) mse = tf.reduce_mean(tf.pow(X-h, 2)) reg_term = tf.reduce_mean(tf.pow(h, 2))", "mse_nz],feed_dict={X: sample_valid,pHidden_holder: 1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\",", "each cell\\n(Ground_truth vs Imputation)\\n{}\\n{}\". format(p.name_ground_truth, p.name_imputation), dir=p.tag, mode='row-wise', nz_mode='first' ) # heatmaps of", "1.0, pIn_holder: 1.0}) print(\"mse_nz_train=\", round(mse_nz_train, 3), \"mse_nz_valid=\",round(mse_nz_valid, 3)) print(\"mse_train=\", round(mse_train, 3),\"mse_valid=\", round(mse_valid, 3))", "p.stage)) scimpute.save_hd5(latent_code_df, \"{}/latent_code.{}.hd5\".format(p.stage, p.stage)) def visualize_weight(sess, stage, w_name, b_name): w = eval(w_name) b", "'impute' p.learning_rate = 0.0 elif mode == 'analysis': p.tag = 'Eval' p.stage =", "mode') input_matrix = input_matrix[:p.m, :p.n] gene_ids = gene_ids[:p.n] cell_ids = cell_ids[:p.m] gc.collect() else:", "= round(mse1, 7) print('MSE1 between Imputation and Input: ', mse1) mse2_nz = scimpute.mse_omega(Y,", "a_bottleneck = e_a2 elif p.L == 3: # change with layer with tf.name_scope('Encoder_L1'):", "analyze_variation_in_genes(X, Y, G, p): '''calculate and visualize standard deviation in each gene write", "else: # For smaller files (hd5, csv, csv.gz) input_df = scimpute.read_data_into_cell_row(p.fname_input, p.ori_input) print('RAM", "in range(num_batch): # x_batch indices = np.arange(p.batch_size * i, p.batch_size*(i+1)) ridx_batch = ridx_full[indices]", "| impute | analysis') p.mode = 'invalid' return p if p.test_flag: p.max_training_epochs =", "correlations of genes between ground truth and imputation') scimpute.hist_2matrix_corr( G.values, Y.values, title=\"Correlation for", "= build_late(X, pHidden_holder, pIn_holder, p, n, rand_state = 3) #2.3 define loss #", "Parameters ------------ p: parameters from global_params.py and example.py Return ----------- X: input data", "std_ratio_yx_data, index=X.columns, columns=['sd_ratio']) std_ratio_yg_data = [(y/x if x!=0 else None) for y, x", "save imputation results for an input matrix at the 'impute' mode. If the", "= 'e_b'+str(l1) decoder_bias_name = 'd_b'+str(l1) decoder_weight_name = 'd_w'+str(l1) np.save('{}/{}.{}'.format(stage, encoder_weight_name, stage), sess.run(eval(encoder_weight_name))) np.save('{}/{}.{}'.format(stage,", "mse_nz_batch_vec.append(mse_nz_train) mse_nz_valid_vec.append(mse_nz_valid) #2.7. training epochs (1-) for epoch in range(1, p.max_training_epochs+1): tic_cpu, tic_wall", "before deleting input_df: {} M'.format(usage())) del(input_df) gc.collect() # working on mac print('RAM usage", "{} M'.format(usage())) if p.fname_input.endswith('h5'): # for 10x genomics large h5 files input_obj =", "range='same', title=str(str(j) + '\\n(Ground_truth vs Input) '), xlabel='Ground Truth', ylabel='Input', dir=gene_dir ) def", "argparse import tensorflow as tf from importlib.machinery import SourceFileLoader import math import psutil", "#std_ratio_yx_df = pd.DataFrame(data= y_std_df.values / x_std_df.values, index=X.columns, columns=['sd_ratio']) #std_ratio_yg_df = pd.DataFrame(data= y_std_df.values /" ]
[ "self.__is_notice = is_notice self.__target = target self.__message = message @property def author(self) ->", "from typing import Optional from irc.messages.base import IRCBaseMessage # Regex for matching the", "-> str: \"\"\"The hostname of the message's author.\"\"\" return self.__hostname @property def is_notice(self)", "author: str, hostname: str, is_notice: bool, target: str, message: str ) -> None:", "message itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String representation of the message.\"\"\"", "return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod", "return self.__target @property def message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message def", "def is_notice(self) -> bool: \"\"\"Whether or not the message is a NOTICE.\"\"\" return", "disable=too-many-arguments self, raw_message: str, author: str, hostname: str, is_notice: bool, target: str, message:", "a message.\"\"\" match = private_message_regex.match(line) if not match: return None author, hostname, type,", "import IRCBaseMessage # Regex for matching the individual parts of an IRC message", "target: str, message: str ) -> None: super().__init__(raw_message) self.__author = author self.__hostname =", "str, hostname: str, is_notice: bool, target: str, message: str ) -> None: super().__init__(raw_message)", ": {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match", "type, target, message = match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line, author,", "def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not", "not the message is a NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str:", "= hostname self.__is_notice = is_notice self.__target = target self.__message = message @property def", "from irc.messages.base import IRCBaseMessage # Regex for matching the individual parts of an", "of the message.\"\"\" return self.__author @property def hostname(self) -> str: \"\"\"The hostname of", "message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private", "match: return None author, hostname, type, target, message = match.groups() is_notice = type", "return self.__author @property def hostname(self) -> str: \"\"\"The hostname of the message's author.\"\"\"", "str, is_notice: bool, target: str, message: str ) -> None: super().__init__(raw_message) self.__author =", "the individual parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+)", "class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message:", "of the message's author.\"\"\" return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or", "\"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a", "import Optional from irc.messages.base import IRCBaseMessage # Regex for matching the individual parts", "self.__is_notice @property def target(self) -> str: \"\"\"The target of the message.\"\"\" return self.__target", "str: \"\"\"The target of the message.\"\"\" return self.__target @property def message(self) -> str:", "private_message_regex.match(line) if not match: return None author, hostname, type, target, message = match.groups()", "\"\"\"String representation of the message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message)", "(PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( #", "]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments", "\"\"\"Whether or not the message is a NOTICE.\"\"\" return self.__is_notice @property def target(self)", "str, message: str ) -> None: super().__init__(raw_message) self.__author = author self.__hostname = hostname", "str: \"\"\"The author of the message.\"\"\" return self.__author @property def hostname(self) -> str:", "\"\"\"The author of the message.\"\"\" return self.__author @property def hostname(self) -> str: \"\"\"The", "{} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line:", "\"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not match: return None author, hostname,", "self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or not the message is a", "is_notice(self) -> bool: \"\"\"Whether or not the message is a NOTICE.\"\"\" return self.__is_notice", "the message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {}", "message is a NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str: \"\"\"The target", "message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname: str,", "self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]:", "return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse", "message = match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line, author, hostname, is_notice,", "self.__author @property def hostname(self) -> str: \"\"\"The hostname of the message's author.\"\"\" return", "hostname self.__is_notice = is_notice self.__target = target self.__message = message @property def author(self)", "target self.__message = message @property def author(self) -> str: \"\"\"The author of the", "itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String representation of the message.\"\"\" if", "parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not match:", "match = private_message_regex.match(line) if not match: return None author, hostname, type, target, message", "str: \"\"\"The hostname of the message's author.\"\"\" return self.__hostname @property def is_notice(self) ->", "-> str: \"\"\"String representation of the message.\"\"\" if self.__is_notice: return \"NOTICE {} :", "= private_message_regex.match(line) if not match: return None author, hostname, type, target, message =", "IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message: str, author: str,", "of the message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG", "message: str ) -> None: super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice", "the message's author.\"\"\" return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or not", "str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String representation of", "super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice = is_notice self.__target = target", "{} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\"", "\"\"\"An IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message: str, author:", "private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname:", "= author self.__hostname = hostname self.__is_notice = is_notice self.__target = target self.__message =", "= message @property def author(self) -> str: \"\"\"The author of the message.\"\"\" return", "the message is a NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str: \"\"\"The", "self.__message = message @property def author(self) -> str: \"\"\"The author of the message.\"\"\"", "def author(self) -> str: \"\"\"The author of the message.\"\"\" return self.__author @property def", "str ) -> None: super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice =", "typing import Optional from irc.messages.base import IRCBaseMessage # Regex for matching the individual", "-> bool: \"\"\"Whether or not the message is a NOTICE.\"\"\" return self.__is_notice @property", "\"\"\"IRC message.\"\"\" import re from typing import Optional from irc.messages.base import IRCBaseMessage #", "message.\"\"\" return self.__target @property def message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message", "IRCBaseMessage # Regex for matching the individual parts of an IRC message private_message_regex", "= match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line, author, hostname, is_notice, target,", "matching the individual parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^", "Regex for matching the individual parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?)", "author self.__hostname = hostname self.__is_notice = is_notice self.__target = target self.__message = message", "pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname: str, is_notice: bool, target: str,", "author.\"\"\" return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or not the message", "\"\"\"The hostname of the message's author.\"\"\" return self.__hostname @property def is_notice(self) -> bool:", "message.\"\"\" import re from typing import Optional from irc.messages.base import IRCBaseMessage # Regex", "if not match: return None author, hostname, type, target, message = match.groups() is_notice", "irc.messages.base import IRCBaseMessage # Regex for matching the individual parts of an IRC", "\"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def", "or not the message is a NOTICE.\"\"\" return self.__is_notice @property def target(self) ->", "of the message.\"\"\" return self.__target @property def message(self) -> str: \"\"\"The message itself.\"\"\"", "of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage):", "hostname, type, target, message = match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line,", "re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__(", ": {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str)", "= re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def", "str, author: str, hostname: str, is_notice: bool, target: str, message: str ) ->", "self.__target = target self.__message = message @property def author(self) -> str: \"\"\"The author", "self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message)", "raw_message: str, author: str, hostname: str, is_notice: bool, target: str, message: str )", "-> str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String representation", "target(self) -> str: \"\"\"The target of the message.\"\"\" return self.__target @property def message(self)", "representation of the message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return", "def __str__(self) -> str: \"\"\"String representation of the message.\"\"\" if self.__is_notice: return \"NOTICE", "match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line, author, hostname, is_notice, target, message)", "self.__author = author self.__hostname = hostname self.__is_notice = is_notice self.__target = target self.__message", "target of the message.\"\"\" return self.__target @property def message(self) -> str: \"\"\"The message", "= target self.__message = message @property def author(self) -> str: \"\"\"The author of", "self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line)", "re from typing import Optional from irc.messages.base import IRCBaseMessage # Regex for matching", "return self.__is_notice @property def target(self) -> str: \"\"\"The target of the message.\"\"\" return", "@property def target(self) -> str: \"\"\"The target of the message.\"\"\" return self.__target @property", "# pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname: str, is_notice: bool, target:", "{}\".format(self.__author, self.__message) @staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match =", "private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\"", "@property def hostname(self) -> str: \"\"\"The hostname of the message's author.\"\"\" return self.__hostname", "individual parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\")", "-> str: \"\"\"The author of the message.\"\"\" return self.__author @property def hostname(self) ->", "return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or not the message is", "message's author.\"\"\" return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether or not the", "__str__(self) -> str: \"\"\"String representation of the message.\"\"\" if self.__is_notice: return \"NOTICE {}", "IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self, raw_message: str,", "bool, target: str, message: str ) -> None: super().__init__(raw_message) self.__author = author self.__hostname", "self.__message def __str__(self) -> str: \"\"\"String representation of the message.\"\"\" if self.__is_notice: return", "hostname: str, is_notice: bool, target: str, message: str ) -> None: super().__init__(raw_message) self.__author", "author, hostname, type, target, message = match.groups() is_notice = type == \"NOTICE\" return", "{}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author, self.__message) @staticmethod def parse(line: str) ->", "def target(self) -> str: \"\"\"The target of the message.\"\"\" return self.__target @property def", "a NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str: \"\"\"The target of the", "is_notice: bool, target: str, message: str ) -> None: super().__init__(raw_message) self.__author = author", "@property def author(self) -> str: \"\"\"The author of the message.\"\"\" return self.__author @property", "-> None: super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice = is_notice self.__target", "IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC", "the message.\"\"\" return self.__author @property def hostname(self) -> str: \"\"\"The hostname of the", "hostname of the message's author.\"\"\" return self.__hostname @property def is_notice(self) -> bool: \"\"\"Whether", "NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str: \"\"\"The target of the message.\"\"\"", ") -> None: super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice = is_notice", "an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An", "@property def message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self) ->", "return self.__message def __str__(self) -> str: \"\"\"String representation of the message.\"\"\" if self.__is_notice:", "-> str: \"\"\"The target of the message.\"\"\" return self.__target @property def message(self) ->", "message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String", "None author, hostname, type, target, message = match.groups() is_notice = type == \"NOTICE\"", "def message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self) -> str:", "= is_notice self.__target = target self.__message = message @property def author(self) -> str:", "self.__hostname = hostname self.__is_notice = is_notice self.__target = target self.__message = message @property", "is_notice self.__target = target self.__message = message @property def author(self) -> str: \"\"\"The", "the message.\"\"\" return self.__target @property def message(self) -> str: \"\"\"The message itself.\"\"\" return", "Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not match: return None author,", "@staticmethod def parse(line: str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if", "message.\"\"\" match = private_message_regex.match(line) if not match: return None author, hostname, type, target,", "message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} :", "def __init__( # pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname: str, is_notice:", "if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author, self.__message) return \"PRIVMSG {} : {}\".format(self.__author,", "Optional from irc.messages.base import IRCBaseMessage # Regex for matching the individual parts of", "author of the message.\"\"\" return self.__author @property def hostname(self) -> str: \"\"\"The hostname", "-> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not match: return None", "hostname(self) -> str: \"\"\"The hostname of the message's author.\"\"\" return self.__hostname @property def", "self, raw_message: str, author: str, hostname: str, is_notice: bool, target: str, message: str", "not match: return None author, hostname, type, target, message = match.groups() is_notice =", "for matching the individual parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE)", "None: super().__init__(raw_message) self.__author = author self.__hostname = hostname self.__is_notice = is_notice self.__target =", "@property def is_notice(self) -> bool: \"\"\"Whether or not the message is a NOTICE.\"\"\"", ":(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( # pylint: disable=too-many-arguments self,", "\"\"\"The message itself.\"\"\" return self.__message def __str__(self) -> str: \"\"\"String representation of the", "import re from typing import Optional from irc.messages.base import IRCBaseMessage # Regex for", "return None author, hostname, type, target, message = match.groups() is_notice = type ==", "message @property def author(self) -> str: \"\"\"The author of the message.\"\"\" return self.__author", "self.__target @property def message(self) -> str: \"\"\"The message itself.\"\"\" return self.__message def __str__(self)", "is a NOTICE.\"\"\" return self.__is_notice @property def target(self) -> str: \"\"\"The target of", "message.\"\"\" return self.__author @property def hostname(self) -> str: \"\"\"The hostname of the message's", "parts of an IRC message private_message_regex = re.compile(\"^:([^!]+)!(.*?) (PRIVMSG|NOTICE) ([^ ]+) :(.*)\") class", "\"\"\"The target of the message.\"\"\" return self.__target @property def message(self) -> str: \"\"\"The", "target, message = match.groups() is_notice = type == \"NOTICE\" return IRCMessage(line, author, hostname,", "__init__( # pylint: disable=too-many-arguments self, raw_message: str, author: str, hostname: str, is_notice: bool,", "([^ ]+) :(.*)\") class IRCMessage(IRCBaseMessage): \"\"\"An IRC private message.\"\"\" def __init__( # pylint:", "# Regex for matching the individual parts of an IRC message private_message_regex =", "bool: \"\"\"Whether or not the message is a NOTICE.\"\"\" return self.__is_notice @property def", "def hostname(self) -> str: \"\"\"The hostname of the message's author.\"\"\" return self.__hostname @property", "author(self) -> str: \"\"\"The author of the message.\"\"\" return self.__author @property def hostname(self)", "str) -> Optional[\"IRCMessage\"]: \"\"\"Parse a message.\"\"\" match = private_message_regex.match(line) if not match: return", "str: \"\"\"String representation of the message.\"\"\" if self.__is_notice: return \"NOTICE {} : {}\".format(self.__author," ]
[ "from calendar import day_name, weekday month, day, year = map(int, input().split()) print(day_name[weekday(year, month,", "#!/usr/bin/env python3 from calendar import day_name, weekday month, day, year = map(int, input().split())", "calendar import day_name, weekday month, day, year = map(int, input().split()) print(day_name[weekday(year, month, day)].upper())", "python3 from calendar import day_name, weekday month, day, year = map(int, input().split()) print(day_name[weekday(year," ]
[ "= x * x def calc(self, x): for i in range(x): self.x +=", "return self.x def method06(x): return C(x).calc(x) @testbench def test(): assert 2 == method06(1)", "def calc(self, x): for i in range(x): self.x += 1 return self.x def", "calc(self, x): for i in range(x): self.x += 1 return self.x def method06(x):", "in range(x): self.x += 1 return self.x def method06(x): return C(x).calc(x) @testbench def", "assert 2 == method06(1) assert 6 == method06(2) assert 12 == method06(3) test()", "+= 1 return self.x def method06(x): return C(x).calc(x) @testbench def test(): assert 2", "x def calc(self, x): for i in range(x): self.x += 1 return self.x", "x): for i in range(x): self.x += 1 return self.x def method06(x): return", "import testbench class C: def __init__(self, x): self.x = x * x def", "for i in range(x): self.x += 1 return self.x def method06(x): return C(x).calc(x)", "x): self.x = x * x def calc(self, x): for i in range(x):", "method06(x): return C(x).calc(x) @testbench def test(): assert 2 == method06(1) assert 6 ==", "C(x).calc(x) @testbench def test(): assert 2 == method06(1) assert 6 == method06(2) assert", "def __init__(self, x): self.x = x * x def calc(self, x): for i", "x * x def calc(self, x): for i in range(x): self.x += 1", "self.x def method06(x): return C(x).calc(x) @testbench def test(): assert 2 == method06(1) assert", "<reponame>ktok07b6/polyphony<gh_stars>10-100 from polyphony import testbench class C: def __init__(self, x): self.x = x", "range(x): self.x += 1 return self.x def method06(x): return C(x).calc(x) @testbench def test():", "i in range(x): self.x += 1 return self.x def method06(x): return C(x).calc(x) @testbench", "testbench class C: def __init__(self, x): self.x = x * x def calc(self,", "self.x = x * x def calc(self, x): for i in range(x): self.x", "return C(x).calc(x) @testbench def test(): assert 2 == method06(1) assert 6 == method06(2)", "@testbench def test(): assert 2 == method06(1) assert 6 == method06(2) assert 12", "def test(): assert 2 == method06(1) assert 6 == method06(2) assert 12 ==", "__init__(self, x): self.x = x * x def calc(self, x): for i in", "class C: def __init__(self, x): self.x = x * x def calc(self, x):", "1 return self.x def method06(x): return C(x).calc(x) @testbench def test(): assert 2 ==", "test(): assert 2 == method06(1) assert 6 == method06(2) assert 12 == method06(3)", "def method06(x): return C(x).calc(x) @testbench def test(): assert 2 == method06(1) assert 6", "polyphony import testbench class C: def __init__(self, x): self.x = x * x", "* x def calc(self, x): for i in range(x): self.x += 1 return", "self.x += 1 return self.x def method06(x): return C(x).calc(x) @testbench def test(): assert", "C: def __init__(self, x): self.x = x * x def calc(self, x): for", "from polyphony import testbench class C: def __init__(self, x): self.x = x *" ]
[ "ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return", "import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument", "= ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host", "(elapsed, command_line)) if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return", "nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host,", ": w8ay # @File : nmap.py import nmap from lib.data import logger def", "= nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None", "-*- # @Time : 2019/1/21 10:05 PM # @Author : w8ay # @File", "logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]: try: result =", "debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line))", "nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS", "% (elapsed, command_line)) if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError:", "nm = nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret", "# @Author : w8ay # @File : nmap.py import nmap from lib.data import", "为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try:", "nmap from lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm", "10:05 PM # @Author : w8ay # @File : nmap.py import nmap from", "w8ay # @File : nmap.py import nmap from lib.data import logger def nmapscan(host,", "# 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout", "nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None #", "logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"]", "1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return", "ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return None return result return None", "ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in", "\"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError:", "-sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap", "= \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except", "@Author : w8ay # @File : nmap.py import nmap from lib.data import logger", "arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None # debug", "if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return None return", "-Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError", ": nmap.py import nmap from lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果", "return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\"", "ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]: try: result", "# @File : nmap.py import nmap from lib.data import logger def nmapscan(host, ports):", "from lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm =", "command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except", "--host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host))", "try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except:", "utf-8 -*- # @Time : 2019/1/21 10:05 PM # @Author : w8ay #", "def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV", "-*- coding: utf-8 -*- # @Time : 2019/1/21 10:05 PM # @Author :", "logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument =", "return None except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"]", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 2019/1/21 10:05 PM", "command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]:", ": 2019/1/21 10:05 PM # @Author : w8ay # @File : nmap.py import", "in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return None return result return", "# debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed,", "lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner()", "接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m", "None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" %", "successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"]", "PM # @Author : w8ay # @File : nmap.py import nmap from lib.data", "None except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap]", "@Time : 2019/1/21 10:05 PM # @Author : w8ay # @File : nmap.py", "= nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret =", "2019/1/21 10:05 PM # @Author : w8ay # @File : nmap.py import nmap", "@File : nmap.py import nmap from lib.data import logger def nmapscan(host, ports): #", "# 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports))", "except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s", "-p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument) except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None", "# -*- coding: utf-8 -*- # @Time : 2019/1/21 10:05 PM # @Author", "ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用 nm = nmap.PortScanner() argument = \"-sV -sS -Pn", "except nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None # debug elapsed", "nmap.py import nmap from lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 #", "host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return None return result", "coding: utf-8 -*- # @Time : 2019/1/21 10:05 PM # @Author : w8ay", "elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line = ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if", "import nmap from lib.data import logger def nmapscan(host, ports): # 接受从masscan上扫描出来的结果 # 为了可以多线程使用,此函数支持多线程调用", "host:{}\".format(host)) return None except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line =", "command_line)) if host in ret[\"scan\"]: try: result = ret[\"scan\"][host][\"tcp\"] except KeyError: return None", "python3 # -*- coding: utf-8 -*- # @Time : 2019/1/21 10:05 PM #", "argument = \"-sV -sS -Pn --host-timeout 1m -p{}\".format(','.join(ports)) try: ret = nm.scan(host, arguments=argument)", "= ret[\"nmap\"][\"command_line\"] logger.debug(\"[nmap] successed,elapsed:%s command_line:%s\" % (elapsed, command_line)) if host in ret[\"scan\"]: try:", "# @Time : 2019/1/21 10:05 PM # @Author : w8ay # @File :", "nmap.PortScannerError: logger.debug(\"Nmap PortScannerError host:{}\".format(host)) return None except: return None # debug elapsed =", "PortScannerError host:{}\".format(host)) return None except: return None # debug elapsed = ret[\"nmap\"][\"scanstats\"][\"elapsed\"] command_line" ]
[ "return (l1, max_t) ## diff m if 0: for fid in range(1, 11):", "loc='best', prop=font2) plt.title('file' + str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name", "fid = 'monkey' # for outer in (0, 10, 20, 30, 40, 50,", "nmid in range(1, 3): mid = str(nmid) ls = [] maxts = []", "range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\"", "' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path +", "dpi=600) plt.clf() # AA-DR test mu if 0: for fid in range(1, 5):", "max(x) return (l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x", "ls.append(l1) maxts.append(maxt) for i in range(1, 7): res = np.loadtxt(path + 'mid' +", "plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name = savepath", "(l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ') ls.append(l1)", "= path + 'mid0_mu10_m' + str(1) + '.txt' # print(name) res = np.loadtxt(name)", "eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] ==", "for outer in (1000, 5000, 10000, 50000, 100000): for outer in range(10, 11):", "# else: res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt)", "+ \"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() #", "0): is_iter = 1 outer = 0 for is_iter in range(0, 2): for", "(0, 5, 10, 15, 20, 25): for fid in range(1, 11): for outer", "+ str(is_iter) + \"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600)", "str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" + mid + str(is_iter) + \"_outer_\"", "2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for i in range(1,", "+ str(fid) + '_' # for is_iter in (1, 0): is_iter = 1", "#'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter in (1, 0): is_iter =", "/ 255]) # 4 colors.append([118 / 255, 171 / 255, 47 / 255])", "'.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) +", "2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file'", "# name = path + 'aadr' + str(i) + '.txt' # res =", "min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if", "= \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_'", "ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,", "= 'r' for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\"", "iter = iter + 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr,", "plt.clf() # diff m each iters if 0: fid = 'monkey' # for", "in (1, 0): is_iter = 1 outer = 0 nmid = 1 aa_admm_m", "1] y = data[:, 2] #/ data[0, 2] if id == 0: label1", "str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name = path + str(fid)", "+ 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m'", "10000, 100000, 1000000): for is_iter in range(1, 2): mid = str(nmid) ls =", "save_name = savepath + str(fid) + \"emid_\" + mid + str(is_iter) + \"_outer_\"", "' + str(outer)) plt.tight_layout() save_name = savepath + ytype + \"AA\" + str(aa_admm_m)", "min(res[:, 3]) # min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i,", "+ str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\"", "prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath + str(fid) + ytype", "max_t) ## diff m if 0: for fid in range(1, 11): path =", "str(outer)+ '_m' + str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid", "/ 255, 176 / 255, 31 / 255]) # 3 colors.append([125 / 255,", "+ 'mid' + mid + '_mu' + str(outer)+ '_m' + str(i) + '.txt')", "data[0, 2] if id == 0: label1 = 'ADMM' else: if is_dr: label1", "= plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter:", "2] if id == 0: label1 = 'ADMM' else: if is_dr: label1 =", "str(i) + '.txt' # res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0,", "+ 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt') # print(path +", "iter = iter + 1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) =", "% 3 == 2, 0) ls.append(l1) maxts.append(maxt) # for i in range(1, 7):", "== 0: # break iter = iter + 1 maxis.append(iter) # print(iter) #", "## diff m if 0: for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\"", "= [] name = path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) #", "'weight': 'normal', 'size': 20} font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size':", "\"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\" + str(outer) +", "maxts.append(maxt) # min_err = min(res[:, 3]) for i in range(1, 7): name =", "0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu'", "+ str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m)", "print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2 =", "'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt') # ADMM (l1, maxt)", "= 'e' for fid in range(1, 11): # fid = 'monkey' path =", "= ['', '', '', ''] colors = [] colors.append([0 / 255, 113 /", "3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else:", "plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\",", "np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return", "192, 193, 194, 195): # for outer in (100000, 50000, 10000, 5000, 1000,", "+ '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) +", "' + str(outer)) plt.tight_layout() save_name = savepath + \"mu\" + str(mu) + \".png\"", "path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter,", "- 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t = max(x)", "+ str(outer)) plt.tight_layout() save_name = savepath + str(fid) + \"emid_\" + mid +", "print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if 1: ytype =", "residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu ' +", "= path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path +", "= find_minerr(path) print(min_err) # min_err = 0 # print(path + 'mid0_outer' + str(outer)", "is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2)", "/ 255]) # 5 colors.append([76 / 255, 189 / 255, 237 / 255])", "len1, len1) else: x = data[:, 0] - data[0, 0] # y =", "print(path + 'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu' +", "is_iter): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x", "ny = [] for i in range(1, len(reset)): if reset[i] > reset[i -", "+ str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' +", "# print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype,", "print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7): res =", "plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) #", "1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res", "+ str(outer)) plt.tight_layout() save_name = savepath + \"mu\" + str(mu) + \".png\" print(save_name)", "residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file'", "len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6,", "'data/coma_data/fig/file' + str(fid) + '_' # for is_iter in (1, 0): is_iter =", "+ '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt' res3", "= data[:, 2] #/ data[0, 2] if id == 0: label1 = 'ADMM'", "if res2[eachiter, 2] == 0: # break iter = iter + 1 maxis.append(iter)", "'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer) +", "str(nmid) ls = [] maxts = [] res = np.loadtxt(path + str(fid) +", "1 aa_admm_m = 6 aa_dr_m = 6 for is_iter in range(0, 2): mid", "1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path", "break iter = iter + 1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt)", "min_err = min(res[:, 3]) for i in range(1, 7): name = path +", "is_iter in range(0, 2): for nmid in range(1, 2): # outer = 0", "= [] colors.append([0 / 255, 113 / 255, 188 / 255]) # 1", "import figsize font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 20} font2", "+ \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) +", "0: # break iter = iter + 1 maxis.append(iter) # print(iter) # plot_err3(data,", "' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res1)): if", "+ '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE')", "fid in range(1, 11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid)", "# if res2[eachiter, 2] == 0: # break iter = iter + 1", "= np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) #", "+ mid + '_mu' + str(outer)+ '_m' + str(i) + '.txt') (l1, maxt)", "maxts.append(maxt) for i in range(1, 7): res = np.loadtxt(path + 'mid' + mid", "# os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_'", "+ 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype,", "+ '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt')", "# 3 colors.append([125 / 255, 46 / 255, 141 / 255]) # 4", "/data[0, 1] y = data[:, 2] #/ data[0, 2] if id == 0:", "= plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if", "reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t =", "res3[eachiter, 2] == 0: # break iter = iter + 1 maxis.append(iter) if", "str(outer) + '.txt') # else: res = np.loadtxt(path + 'mid0_mu' + str(outer) +", "- min_err else: y = data[:, 2] if id == 0: label1 =", "0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res =", "plt.tight_layout() save_name = savepath + \"mid_\" + mid + str(is_iter) + \"_outer_\" +", "0: # break iter = iter + 1 maxis.append(iter) # print(iter) ## AA-DR", "2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res =", "= str(nmid) ls = [] maxts = [] min_err = find_minerr(path) print(min_err) #", "255]) # 5 colors.append([76 / 255, 189 / 255, 237 / 255]) #", "np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] # y", "/ 255, 188 / 255]) # 1 colors.append([216 / 255, 82 / 255,", "'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3 = np.loadtxt(path +", "191, 192, 193, 194, 195): # for outer in (100000, 50000, 10000, 5000,", "y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t", "maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): name", "in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:,", "x = np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0]", "[] for i in range(1, len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i])", "20} font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 16} labels =", "color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t) def plot_reset(data, is_iter): len1 =", "min_err = find_minerr(path) print(min_err) # min_err = 0 # print(path + 'mid0_outer' +", "id, is_dr, cid, min_err, ytype, resetype): name = path + str(fid) + '_mid3_outer_0.txt'", "min_err): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x", "= savepath + \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf()", "i in range(1, 7): name = path + str(fid) + 'aaadmm' + str(i)", "# 7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter:", "1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t = max(x) #", "in range(0, 2): mid = str(nmid) ls = [] maxts = [] maxis", "+ resetype else: label1 = 'AA ADMM' + resetype if ytype == 'r':", "## AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1, maxt)", "80, 83): # for outer in (0, 10, 20, 30, 40, 50, 60):", "255, 141 / 255]) # 4 colors.append([118 / 255, 171 / 255, 47", "is_iter, id, is_dr, cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0,", "font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1)", "break iter = iter + 1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt)", "'.txt' # res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else:", "/ 255]) # 3 colors.append([125 / 255, 46 / 255, 141 / 255])", "0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7): res = np.loadtxt(path +", "# AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ')", "# plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath + str(fid) + ytype +", "> 3: # res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') #", "102, 103, 104, 191, 192, 193, 194, 195): # for outer in (100000,", "fid in range(1, 11): for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" +", "+ str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m'", "ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,", "6 aa_dr_m = 6 # for outer in (0, 10, 20, 30, 40,", "str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each iters", "3]) for i in range(1, 7): name = path + str(fid) + 'aaadmm'", "== 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x,", "plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + '", "id == 0: label1 = 'ADMM' else: if is_dr: label1 = 'DR m='", "0: ytype = 'e' for fid in range(1, 11): # fid = 'monkey'", "save_name = savepath + str(fid) + ytype + \"AA\" + str(aa_admm_m) + \"_DR\"", "[] ny = [] for i in range(1, len(reset)): if reset[i] > reset[i", "plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls,", "60): # for outer in (0, 5, 10, 15, 20, 25): for fid", "2, 0) ls.append(l1) maxts.append(maxt) # for i in range(1, 7): # name =", "+ str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name = savepath +", "[] maxts = [] res = np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt)", "y = data[:, 2] # /data[0, 1] y = data[:, 2] #/ data[0,", "in range(1, len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny,", "ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path", "141 / 255]) # 4 colors.append([118 / 255, 171 / 255, 47 /", "str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if", "else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined", "ytype = 'e' for fid in range(1, 11): # fid = 'monkey' path", "if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype", "'.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1)", "for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2]", "x = data[:, 0] - data[0, 0] # y = data[:, 2] #", "in range(0, 2): for nmid in range(1, 2): # outer = 0 #", "ny, color='blue', alpha=0.6, s=20) # max_t = max(x) # return (l1, max_t) ##", "mid = str(nmid) ls = [] maxts = [] maxis = [] name", "plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2)", "= path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name =", "plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7): res", "data[0, 0] # y = data[:, 2] # /data[0, 1] y = data[:,", "ls = [] maxts = [] maxis = [] name = path +", "np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x = data[:, 0]", "str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 ==", "min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)):", "if id == 0: label1 = 'ADMM' else: if is_dr: label1 = 'DR", "str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter = 1", "= [] # min_err = find_minerr(path, outer) min_err = 0 # if nmid", "= 1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 # for", "+ '.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2,", "data[:, 2] if id == 0: label1 = 'ADMM' else: if is_dr: label1", "'e' for fid in range(1, 11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\"", "'aadr' + str(i) + '.txt' # res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\",", "range(1, 7): # name = path + 'aadr' + str(i) + '.txt' #", "data[:, 0] - data[0, 0] # y = data[:, 2] # /data[0, 1]", "i in range(1, 7): res = np.loadtxt(path + 'mid' + mid + '_mu'", "plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu if 0: for fid in", "50000, 100000): for outer in range(10, 11): for is_iter in range(0, 2): mid", "range(1, 5): path = \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" +", "3, 4, 100, 101, 102, 103, 104, 191, 192, 193, 194, 195): #", "nx = [] ny = [] for i in range(1, len(reset)): if reset[i]", "maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1,", "ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1)", "82 / 255, 24 / 255]) # 2 colors.append([236 / 255, 176 /", "= 1 outer = 1 nmid = 1 aa_admm_m = 6 aa_dr_m =", "+ 'mid0_mu' + str(outer) + '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) +", "plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5)", "y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t) def plot_err3(data, is_iter,", "l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1,", "mu ' + str(outer)) plt.tight_layout() save_name = savepath + ytype + \"AA\" +", "np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break iter =", "2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0,", "= np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path", "np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err = min(min_err, min_err1) (l1, maxt)", "# AA-DR if 1: ytype = 'r' for fid in range(1, 4): path", "range(1, 11): for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) +", "eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] ==", "= np.loadtxt(name) # print(res) name = path + str(fid) + '_mid1_outer_0.txt' res2 =", "m=' + str(id) else: label1 = 'AA m=' + str(id) l1, = plt.semilogy(x,", "+ ' mu ' + str(outer)) plt.tight_layout() save_name = savepath + ytype +", "16} labels = ['', '', '', ''] colors = [] colors.append([0 / 255,", "min_err = find_minerr(path, outer) min_err = 0 # if nmid > 3: #", "== 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\",", "savepath + str(fid) + \"emid_\" + mid + str(is_iter) + \"_outer_\" + str(outer)", "maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if", "max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu", "'Times New Roman', 'weight': 'normal', 'size': 20} font2 = {'family': 'Times New Roman',", "113 / 255, 188 / 255]) # 1 colors.append([216 / 255, 82 /", "in range(1, 5): path = \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\"", "iter + 1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter,", "+ str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m,", "(l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt)", "15, 20, 25): for fid in range(1, 11): for outer in range(10, 11):", "+ str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter", "AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1, maxt) =", "194, 195): # for outer in (100000, 50000, 10000, 5000, 1000, 500, 100):", "0 # if nmid > 3: # res = np.loadtxt(path + 'mid3_m5_outer' +", "# ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype,", "plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' +", "= str(nmid) ls = [] maxts = [] # min_err = find_minerr(path, outer)", "+ '.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err,", "font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + '", "ls = [] maxts = [] res = np.loadtxt(path + str(fid) + 'admm.txt')", "1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype):", "100, 1000, 10000, 100000, 1000000): for is_iter in range(1, 2): mid = str(nmid)", "ytype, resetype): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else:", "is_iter = 1 for is_iter in range(0, 2): for nmid in range(1, 3):", "\"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in", "in range(1, 7): name = path + str(fid) + 'aaadmm' + str(i) +", "10, 15, 20, 25): # for outer in (0, 1, 2, 3, 4,", "print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = [] name = path", "/ data[0, 1] reset = data[:, 2] nx = [] ny = []", "nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for mu in (10,", "+ str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150)", "plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' outer", "') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter,", "break iter = iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis))", "str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name = savepath + str(fid)", "is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0,", "plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout()", "len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break", "= [] maxts = [] min_err = find_minerr(path) print(min_err) # min_err = 0", "name = path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) =", "resetype else: label1 = 'AA ADMM' + resetype if ytype == 'r': l1,", "'_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err,", "savepath + \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def", "is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1)", "# for is_iter in (1, 0): is_iter = 1 for is_iter in range(0,", "str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter in", "ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu) +", "255]) # 1 colors.append([216 / 255, 82 / 255, 24 / 255]) #", "+ str(outer) + '_m' + str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' +", "for is_iter in range(0, 2): for nmid in range(1, 2): # outer =", "## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '')", "== 'e': y = data[:, 1] - min_err else: y = data[:, 2]", "is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for", "np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] == 0: # break iter =", "'.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1)", "(l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt)", "str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3 ==", "7): res = np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+ '_m'", "= 0 # # nmid = 1 mid = str(nmid) ls = []", "1000000): for is_iter in range(1, 2): mid = str(nmid) ls = [] maxts", "matplotlib.pyplot as plt import os from IPython.core.pylabtools import figsize font1 = {'family': 'Times", "== 0: # break iter = iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\",", "5 colors.append([76 / 255, 189 / 255, 237 / 255]) # 6 colors.append([255", "25): for fid in range(1, 11): for outer in range(10, 11): path =", "[] maxts = [] # min_err = find_minerr(path, outer) min_err = 0 #", "font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy',", "+ str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3", "# break iter = iter + 1 maxis.append(iter) # print(iter) ## AA-DR (l1,", "\"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each iters if 0:", "'size': 20} font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 16} labels", "aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0,", "color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x)", "min(res[:, 3]) for i in range(1, 7): name = path + str(fid) +", "2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else:", "+ \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path):", "100, 101, 102, 103, 104, 191, 192, 193, 194, 195): # for outer", "= 'data/coma_data/fig/file' + str(fid) + '_' # for is_iter in (1, 0): is_iter", "python import numpy as np import matplotlib.pyplot as plt import os from IPython.core.pylabtools", "= np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0,", "= np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs)", "prop=font2) # plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name", "AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1)", "plt.tight_layout() save_name = savepath + \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True,", "range(1, 2): mid = str(nmid) ls = [] maxts = [] res =", "def find_minerr(path): min_errs = [] name = path + 'mid0_mu10_m' + str(1) +", "# for outer in (0, 5, 10, 15, 20, 25): # for outer", "plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for", "== 0: label1 = 'ADMM' else: if is_dr: label1 = 'ours' + resetype", "in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f'", "min_err = 0 # print(path + 'mid0_outer' + str(outer) + '.txt') res1 =", "= path + 'aadr' + str(i) + '.txt' # res = np.loadtxt(name) #", "\".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu if 0: for", "'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y,", "= [] maxts = [] # min_err = find_minerr(path, outer) min_err = 0", "label1 = 'AA m=' + str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5)", "str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name = savepath + \"mid_\"", "'ours' + resetype else: label1 = 'AA ADMM' + resetype if ytype ==", "AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1, maxt) =", "2): # outer = 0 # # nmid = 1 mid = str(nmid)", "+ 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0,", "# print(path + 'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu'", "else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + '", "range(10, 11): for is_iter in range(0, 2): mid = str(nmid) ls = []", "10000, 5000, 1000, 500, 100): # for outer in (1000, 5000, 10000, 50000,", "str(outer)) plt.tight_layout() save_name = savepath + str(fid) + \"emid_\" + mid + str(is_iter)", "# plt.show() # AA-DR if 1: ytype = 'r' for fid in range(1,", "res3[eachiter, 2] == 0: # break iter = iter + 1 maxis.append(iter) #", "'_outer' + str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer) +", "res = np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+ '_m' +", "7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res", "20, 25): for fid in range(1, 11): for outer in range(10, 11): path", "+ '_' # for is_iter in (1, 0): is_iter = 1 for is_iter", "'_outer' + str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer) +", "'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0)", "if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2)", "min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\",", "res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err =", "min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype,", "else: x = data[:, 0] - data[0, 0] if ytype == 'e': y", "os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0):", "+ 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3 = np.loadtxt(path", "if ytype == 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1,", "= np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') # ADMM # data, is_iter,", "maxts = [] maxis = [] name = path + str(fid) + '_mid0_outer_0.txt'", "\"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs", "+ 'mid0_mu' + str(mu) + '.txt') # ADMM # data, is_iter, id, is_dr,", "ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype, '", "plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) + '", "in range(0, 2): for nmid in range(1, 3): mid = str(nmid) ls =", "= np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt') #", "outer ' + str(outer)) plt.tight_layout() save_name = savepath + \"mu\" + str(mu) +", "= 6 # for outer in (0, 10, 20, 30, 40, 50, 60,", "\"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\"", "max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype == 'r':", "[] min_err = find_minerr(path) print(min_err) # min_err = 0 # print(path + 'mid0_outer'", "range(1, 11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_'", "# min_err1 = min(res[:, 3]) # min_err = min(min_err, min_err1) (l1, maxt) =", "plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1,", "outer) min_err = 0 # if nmid > 3: # res = np.loadtxt(path", "plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0,", "= iter + 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr, cid,", "+ str(i) + '.txt' res = np.loadtxt(name) # min_err1 = min(res[:, 3]) #", "(1, 0): is_iter = 1 for is_iter in range(0, 2): for nmid in", "2, 3, 4, 100, 101, 102, 103, 104, 191, 192, 193, 194, 195):", "str(outer)) plt.tight_layout() save_name = savepath + \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name,", "1 aa_admm_m = 6 aa_dr_m = 6 # for outer in (0, 10,", "iter = iter + 1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) =", "str(fid) + 'aaadmm' + str(i) + '.txt' res = np.loadtxt(name) # min_err1 =", "+ 'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer)", "s=20) # max_t = max(x) # return (l1, max_t) ## diff m if", "'_' # for is_iter in (1, 0): is_iter = 1 for is_iter in", "0] - data[0, 0] # y = data[:, 2] # /data[0, 1] y", "plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath + str(fid) + ytype + \"AA\"", "res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype,", "ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1,", "savepath + str(fid) + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m)", "max(x) return (l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype):", "\"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' #", "str(aa_dr_m) + '.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0,", "ls = [] maxts = [] # min_err = find_minerr(path, outer) min_err =", "# for outer in (0, 10, 20, 30, 40, 50, 60, 70, 80,", "print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): name = path", "'mid0_mu' + str(mu) + '.txt') # ADMM # data, is_iter, id, is_dr, cid,", "prop=font2) plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name =", "max(x) # return (l1, max_t) ## diff m if 0: for fid in", "'mid3_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 3,", "min_err1 = min(res[:, 3]) # min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res,", "+ '.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') # print(path", "+ '_' # for is_iter in (1, 0): is_iter = 1 outer =", "+ 'mid3_m5_outer' + str(outer) + '.txt') # else: res = np.loadtxt(path + 'mid0_mu'", "+ '_outer' + str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer)", "data[:, 1] - min_err else: y = data[:, 2] if id == 0:", "1 outer = 0 for is_iter in range(0, 2): for nmid in range(1,", "in (0, 1, 2, 3, 4, 100, 101, 102, 103, 104, 191, 192,", "4 colors.append([118 / 255, 171 / 255, 47 / 255]) # 5 colors.append([76", "max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' outer ' + str(outer))", "for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath", "+ \"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\" + str(outer)", "195): # for outer in (100000, 50000, 10000, 5000, 1000, 500, 100): #", "+ ' outer ' + str(outer)) plt.tight_layout() save_name = savepath + str(fid) +", "= [] min_err = find_minerr(path) print(min_err) # min_err = 0 # print(path +", "colors.append([236 / 255, 176 / 255, 31 / 255]) # 3 colors.append([125 /", "is_iter in (1, 0): is_iter = 1 outer = 0 nmid = 1", "save_name = savepath + \"mu\" + str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100)", "\"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) +", "str(outer) + '_m' + str(aa_dr_m) + '.txt') # ADMM (l1, maxt) = plot_err3(res1,", "str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y,", "str(fid)) plt.tight_layout() save_name = savepath + str(fid) + ytype + \"AA\" + str(aa_admm_m)", "= {'family': 'Times New Roman', 'weight': 'normal', 'size': 16} labels = ['', '',", "# data, is_iter, id, is_dr, cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res,", "+ str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m'", "res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res,", "if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\",", "i in range(1, len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx,", "dpi=100) plt.clf() def find_minerr(path): min_errs = [] name = path + 'mid0_mu10_m' +", "# AA-DR test mu if 0: for fid in range(1, 5): path =", "font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) + ' outer", "plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0", "res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res =", "iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1)", "ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter)", "plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid)", "range(0, 2): for nmid in range(1, 2): # outer = 0 # #", "# if res3[eachiter, 2] == 0: # break iter = iter + 1", "= 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if", "11): for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_'", "m if 0: for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir", "ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res1)):", "# ADMM # data, is_iter, id, is_dr, cid, min_err, ytype, resetype (l1, maxt)", "= np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] if", "# min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid %", "\"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" + str( outer) +", "> reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t", "fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath", "# for outer in (0, 1, 2, 3, 4, 100, 101, 102, 103,", "= min(res[:, 3]) for i in range(1, 7): name = path + str(fid)", "= iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\",", "path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name = path", "plt.show() # AA-DR if 1: ytype = 'r' for fid in range(1, 4):", "fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f'", "res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') # print(path + 'mid1_m'", "mid = str(nmid) ls = [] maxts = [] # min_err = find_minerr(path,", "np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM", "is_iter, 1, 0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter)", "11): for is_iter in range(0, 2): mid = str(nmid) ls = [] maxts", "str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype,", "(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt)", "1 aa_admm_m = 6 aa_dr_m = 6 for mu in (10, 100, 1000,", "l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t)", "maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter", "plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t = max(x) # return (l1, max_t)", "+ \"_t\" + str(is_iter) + \"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name,", "in (1000, 5000, 10000, 50000, 100000): for outer in range(10, 11): for is_iter", "ytype == 'e': y = data[:, 1] - min_err else: y = data[:,", "= plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter:", "in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] == 0:", "'.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ')", "in (0, 5, 10, 15, 20, 25): # for outer in (0, 1,", "25): # for outer in (0, 1, 2, 3, 4, 100, 101, 102,", "(l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt)", "(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter", "% 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000)", "min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0,", "x = data[:, 0] - data[0, 0] y = data[:, 1] / data[0,", "for fid in range(1, 11): for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\"", "0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu'", "# 5 colors.append([76 / 255, 189 / 255, 237 / 255]) # 6", "outer = 1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for", "is_iter, 0, 0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res", "min_err, ytype, resetype): name = path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name)", "def plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x =", "in (0, 10, 20, 30, 40, 50, 60): # for outer in (0,", "aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) =", "= [] res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') # ADMM", "255, 82 / 255, 24 / 255]) # 2 colors.append([236 / 255, 176", "1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100)", "in range(0, 2): mid = str(nmid) ls = [] maxts = [] min_err", "min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m,", "res = np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0,", "if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] == 0: # break iter", "'_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt')", "l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1,", "') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu) +", "0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3,", "ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts))", "maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break", "20, 25): # for outer in (0, 1, 2, 3, 4, 100, 101,", "+ '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter in", "plt.clf() # AA-DR test mu if 0: for fid in range(1, 5): path", "label1 = 'ADMM' else: if is_dr: label1 = 'DR m=' + str(id) else:", "+ str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu'", "= plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res,", "max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1,", "for outer in (0, 10, 20, 30, 40, 50, 60, 70, 80, 83):", "0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter,", "(10, 100, 1000, 10000, 100000, 1000000): for is_iter in range(1, 2): mid =", "maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter =", "= 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter in (1, 0): is_iter", "res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7): res = np.loadtxt(path", "resetype): name = path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt)", "str(outer) + '_m' + str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m)", "= \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid)", "np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 1,", "# AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1, maxt)", "y = data[:, 2] #/ data[0, 2] if id == 0: label1 =", "11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath", "min_err = min(min_errs) return min_err # AA-DR each iters if 0: ytype =", "str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' +", "path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for", "aa_dr_m = 6 # for outer in (0, 10, 20, 30, 40, 50,", "is_dr: label1 = 'DR m=' + str(id) else: label1 = 'AA m=' +", "min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1)", "for fid in range(1, 11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" +", "is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1,", "3): mid = str(nmid) ls = [] maxts = [] # min_err =", "is_iter, id, is_dr, cid, min_err, ytype, resetype): name = path + str(fid) +", "print(min_err) # min_err = 0 # print(path + 'mid0_outer' + str(outer) + '.txt')", "= data[:, 0] - data[0, 0] # y = data[:, 2] # /data[0,", "# nmid = 1 mid = str(nmid) ls = [] maxts = []", "= min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 ==", "= np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter,", "numpy as np import matplotlib.pyplot as plt import os from IPython.core.pylabtools import figsize", "0 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for mu in", "str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt'", "len1) else: x = data[:, 0] - data[0, 0] if ytype == 'e':", "= 6 aa_dr_m = 6 # for outer in (0, 10, 20, 30,", "prop=font2) plt.title('file' + str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name =", "= 'monkey' # for outer in (0, 10, 20, 30, 40, 50, 60,", "plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else:", "loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath + str(fid) +", "+ '.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype, '", "1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)", "font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1)", "print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3 =", "colors = [] colors.append([0 / 255, 113 / 255, 188 / 255]) #", "= np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') # print(path + 'mid1_m' +", "ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]):", "'ADMM' else: if is_dr: label1 = 'ours' + resetype else: label1 = 'AA", "+ \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" + str( outer)", "font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath +", "str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer)", "range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] == 0: #", "IPython.core.pylabtools import figsize font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 20}", "from IPython.core.pylabtools import figsize font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size':", "255, 189 / 255, 237 / 255]) # 6 colors.append([255 / 255, 128", "ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '", "'mid0_mu' + str(outer) + '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) + '_outer'", "'AA m=' + str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1,", "# plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name =", "/ 255, 46 / 255, 141 / 255]) # 4 colors.append([118 / 255,", "0: for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" +", "+ str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0,", "= np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE')", "str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" + str(", "6 aa_dr_m = 6 for mu in (10, 100, 1000, 10000, 100000, 1000000):", "len1, len1) else: x = data[:, 0] - data[0, 0] if ytype ==", "np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0,", "2 colors.append([236 / 255, 176 / 255, 31 / 255]) # 3 colors.append([125", "else: label1 = 'AA m=' + str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id],", "plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu '", "1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1,", "= min(res[:, 3]) # min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter,", "max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best',", "== 0: # break iter = iter + 1 maxis.append(iter) # print(iter) ##", "'.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7):", "3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1,", "return min_err # AA-DR each iters if 0: ytype = 'e' for fid", "data[0, 0] y = data[:, 1] / data[0, 1] reset = data[:, 2]", "os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_' # for", "iters if 0: ytype = 'e' for fid in range(1, 11): # fid", "iters if 0: fid = 'monkey' # for outer in (0, 10, 20,", "100000): for outer in range(10, 11): for is_iter in range(0, 2): mid =", "+ 'aaadmm' + str(i) + '.txt' res = np.loadtxt(name) # min_err1 = min(res[:,", "# y = data[:, 2] # /data[0, 1] y = data[:, 2] #/", "maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype,", "[] # min_err = find_minerr(path, outer) min_err = 0 # if nmid >", "ADMM # data, is_iter, id, is_dr, cid, min_err, ytype, resetype (l1, maxt) =", "plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = [] name = path +", "{'family': 'Times New Roman', 'weight': 'normal', 'size': 16} labels = ['', '', '',", "mu in (10, 100, 1000, 10000, 100000, 1000000): for is_iter in range(1, 2):", "\"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter in (1, 0): is_iter", "str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1,", "255]) # 4 colors.append([118 / 255, 171 / 255, 47 / 255]) #", "aa_dr_m = 6 for mu in (10, 100, 1000, 10000, 100000, 1000000): for", "in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] == 0:", "= np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0,", "return (l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x =", "outer = 0 # # nmid = 1 mid = str(nmid) ls =", "res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') # else: res =", "plt.tight_layout() save_name = savepath + str(fid) + ytype + \"AA\" + str(aa_admm_m) +", "/ 255, 0 / 255]) # 7 def plot_errmore(data, is_iter, id, is_dr, min_err):", "'_m' + str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer'", "str(fid) + '_' # for is_iter in (1, 0): is_iter = 1 for", "0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR", "# # nmid = 1 mid = str(nmid) ls = [] maxts =", "(1, 0): is_iter = 1 outer = 1 nmid = 1 aa_admm_m =", "str(nmid) ls = [] maxts = [] res = np.loadtxt(path + 'mid0_mu' +", "in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath =", "+ 'mid1_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0,", "1, 0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ##", "(l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt)", "#/ data[0, 2] if id == 0: label1 = 'ADMM' else: if is_dr:", "is_dr, cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0, 0,", "+ 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0,", "m each iters if 0: fid = 'monkey' # for outer in (0,", "+ '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter,", "# 6 colors.append([255 / 255, 128 / 255, 0 / 255]) # 7", "'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2 = np.loadtxt(path +", "for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid))", "plt import os from IPython.core.pylabtools import figsize font1 = {'family': 'Times New Roman',", "255, 188 / 255]) # 1 colors.append([216 / 255, 82 / 255, 24", "is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt)", "+ '.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m)", "for is_iter in (1, 0): is_iter = 1 for is_iter in range(0, 2):", "+ \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each iters if", "/ 255, 24 / 255]) # 2 colors.append([236 / 255, 176 / 255,", "255, 171 / 255, 47 / 255]) # 5 colors.append([76 / 255, 189", "50, 60): # for outer in (0, 5, 10, 15, 20, 25): for", "50, 60, 70, 80, 83): # for outer in (0, 10, 20, 30,", "+ str(fid) + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) +", "+ str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x,", "# break iter = iter + 1 maxis.append(iter) # print(iter) # AA-ADMM (l1,", "+ ' outer ' + str(outer)) plt.tight_layout() save_name = savepath + \"mu\" +", "+ str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter =", "# for outer in (100000, 50000, 10000, 5000, 1000, 500, 100): # for", "ytype = 'r' for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir", "+ str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test", "6 # for outer in (0, 10, 20, 30, 40, 50, 60, 70,", "font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best',", "m in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt')", "+ 1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m,", "# if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls,", "'_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i", "/ 255]) # 6 colors.append([255 / 255, 128 / 255, 0 / 255])", "range(1, len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue',", "iter = iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else:", "+ '.txt') # else: res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt')", "len1) else: x = data[:, 0] - data[0, 0] y = data[:, 1]", "str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() #", "\"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() #", "plt.clf() def find_minerr(path): min_errs = [] name = path + 'mid0_mu10_m' + str(1)", "1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0,", "# res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') # else: res", "2] == 0: # break iter = iter + 1 maxis.append(iter) # print(iter)", "6 for mu in (10, 100, 1000, 10000, 100000, 1000000): for is_iter in", "y = data[:, 1] - min_err else: y = data[:, 2] if id", "data[0, 1] reset = data[:, 2] nx = [] ny = [] for", "5000, 1000, 500, 100): # for outer in (1000, 5000, 10000, 50000, 100000):", "= plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR", "+ str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name = path +", "mid = str(nmid) ls = [] maxts = [] res = np.loadtxt(path +", "else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid))", "plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each iters if 0: fid =", "' + str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" + mid + str(is_iter)", "if 0: ytype = 'e' for fid in range(1, 11): # fid =", "for outer in range(10, 11): for is_iter in range(0, 2): mid = str(nmid)", "10, 15, 20, 25): for fid in range(1, 11): for outer in range(10,", "= plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t) def", "237 / 255]) # 6 colors.append([255 / 255, 128 / 255, 0 /", "+ \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter)", "labels = ['', '', '', ''] colors = [] colors.append([0 / 255, 113", "0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in", "is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype ==", "plt.title('file' + str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name = savepath", "path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path + str(fid)", "'mid1_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0, 1,", "res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res,", "np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1)", "maxts = [] res = np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) =", "500, 100): # for outer in (1000, 5000, 10000, 50000, 100000): for outer", "\"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter = 1 outer = 1", "for mu in (10, 100, 1000, 10000, 100000, 1000000): for is_iter in range(1,", "savepath + \"mid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\"", "ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1)", "= savepath + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) +", "is_iter, 0, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err =", "0] y = data[:, 1] / data[0, 1] reset = data[:, 2] nx", "font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype", "'mid' + mid + '_mu' + str(outer)+ '_m' + str(i) + '.txt') (l1,", "maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) #", "aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in", "10, 20, 30, 40, 50, 60): # for outer in (0, 5, 10,", "plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter =", "ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts))", "in (10, 100, 1000, 10000, 100000, 1000000): for is_iter in range(1, 2): mid", "range(0, 2): for nmid in range(1, 3): mid = str(nmid) ls = []", "range(1, 7): res = np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+", "name = path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name", "linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return", "+ str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_' # for is_iter in", "2] == 0: # break iter = iter + 1 maxis.append(iter) if is_iter:", "'size': 16} labels = ['', '', '', ''] colors = [] colors.append([0 /", "label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t) def plot_reset(data, is_iter): len1", "font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' +", "0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2,", "+ str(outer) + '_m' + str(aa_dr_m) + '.txt') # ADMM (l1, maxt) =", "if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0,", "= savepath + \"mid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) +", "255, 0 / 255]) # 7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1", "+ '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) +", "iter + 1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter,", "') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu'", "# break iter = iter + 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter,", "'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid)", "0: # break iter = iter + 1 maxis.append(iter) # print(iter) # AA-ADMM", "'mid3_m5_outer' + str(outer) + '.txt') # else: res = np.loadtxt(path + 'mid0_mu' +", "0): is_iter = 1 outer = 1 nmid = 1 aa_admm_m = 6", "30, 40, 50, 60): # for outer in (0, 5, 10, 15, 20,", "str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu' +", "== 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' +", "'normal', 'size': 16} labels = ['', '', '', ''] colors = [] colors.append([0", "test mu if 0: for fid in range(1, 5): path = \"data/coma_data/res/f\" +", "# for is_iter in (1, 0): is_iter = 1 outer = 0 nmid", "mid + '_mu' + str(outer)+ '_m' + str(i) + '.txt') (l1, maxt) =", "= [] ny = [] for i in range(1, len(reset)): if reset[i] >", "+ '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1,", "7): # name = path + 'aadr' + str(i) + '.txt' # res", "transparent=True, dpi=150) plt.clf() # diff m each iters if 0: fid = 'monkey'", "is_iter = 1 outer = 0 nmid = 1 aa_admm_m = 6 aa_dr_m", "+ str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf()", "each iters if 0: ytype = 'e' for fid in range(1, 11): #", "0: label1 = 'ADMM' else: if is_dr: label1 = 'ours' + resetype else:", "color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x)", "plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): name = path + str(fid)", "= plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter", "is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50)", "maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined", "= str(nmid) ls = [] maxts = [] res = np.loadtxt(path + 'mid0_mu'", "# plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name =", "'AA ADMM' + resetype if ytype == 'r': l1, = plt.semilogy(x, y, label=label1,", "= 0 for is_iter in range(0, 2): for nmid in range(1, 2): #", "1 for is_iter in range(0, 2): for nmid in range(1, 3): mid =", "255, 47 / 255]) # 5 colors.append([76 / 255, 189 / 255, 237", "plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1)", "str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" + str( outer) + \".png\" print(save_name)", "maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls,", "else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1,", "plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM", "'-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu)", "data[:, 2] #/ data[0, 2] if id == 0: label1 = 'ADMM' else:", "= 1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for is_iter", "str(fid) + \"emid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\"", "for i in range(1, 7): name = path + str(fid) + 'aaadmm' +", "ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for i in range(1, 7): name", "range(1, 3): mid = str(nmid) ls = [] maxts = [] # min_err", "'', '', ''] colors = [] colors.append([0 / 255, 113 / 255, 188", "1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for is_iter in", "\"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR", "+ '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 3,", "'monkey' # for outer in (0, 10, 20, 30, 40, 50, 60, 70,", "data[:, 0] - data[0, 0] if ytype == 'e': y = data[:, 1]", "if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best',", "outer in (0, 5, 10, 15, 20, 25): # for outer in (0,", "outer = 1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 #", "res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0,", "iter = 0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break #", "plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual',", "min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1,", "+ \"emid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name)", "# AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '')", "str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter", "i, nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1)", "is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1)", "60): # for outer in (0, 5, 10, 15, 20, 25): # for", "(l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ') ls.append(l1)", "name = path + 'mid0_mu10_m' + str(1) + '.txt' # print(name) res =", "maxts.append(maxt) # for i in range(1, 7): # name = path + 'aadr'", "maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt)", "= 1 outer = 0 nmid = 1 aa_admm_m = 6 aa_dr_m =", "maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu)", "nmid in range(1, 2): # outer = 0 # # nmid = 1", "= 1 outer = 0 for is_iter in range(0, 2): for nmid in", "def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if", "label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t =", "str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err # AA-DR each", "/ 255, 237 / 255]) # 6 colors.append([255 / 255, 128 / 255,", "0, 0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res =", "/ 255, 128 / 255, 0 / 255]) # 7 def plot_errmore(data, is_iter,", "+ \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = [] name", "255, 24 / 255]) # 2 colors.append([236 / 255, 176 / 255, 31", "[] res = np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter,", "fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath", "5000, 10000, 50000, 100000): for outer in range(10, 11): for is_iter in range(0,", "plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\",", "for m in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m) +", "# l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1,", "plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt')", "104, 191, 192, 193, 194, 195): # for outer in (100000, 50000, 10000,", "figsize font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 20} font2 =", "maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) #", "+ str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR", "0] - data[0, 0] y = data[:, 1] / data[0, 1] reset =", "= data[:, 2] if id == 0: label1 = 'ADMM' else: if is_dr:", "for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2]", "+ str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" + mid + str(is_iter) +", "= 'AA ADMM' + resetype if ytype == 'r': l1, = plt.semilogy(x, y,", "np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m)", "font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu ' + str(outer))", "= str(nmid) ls = [] maxts = [] res = np.loadtxt(path + str(fid)", "'DR m=' + str(id) else: label1 = 'AA m=' + str(id) l1, =", "10000, 50000, 100000): for outer in range(10, 11): for is_iter in range(0, 2):", "def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1)", "\"emid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name,", "'-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0,", "= np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] y", "2] nx = [] ny = [] for i in range(1, len(reset)): if", "70, 80, 83): # for outer in (0, 10, 20, 30, 40, 50,", "len1, len1) else: x = data[:, 0] - data[0, 0] y = data[:,", "nmid = 1 mid = str(nmid) ls = [] maxts = [] res", "min_err # AA-DR each iters if 0: ytype = 'e' for fid in", "+ \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show()", "in range(1, 11): # fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) +", "= [] maxts = [] maxis = [] name = path + str(fid)", "residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu ' +", "maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt)", "str(mu) + '.txt') # ADMM # data, is_iter, id, is_dr, cid, min_err, ytype,", "'_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt' res3 =", "aa_admm_m = 6 aa_dr_m = 6 # for outer in (0, 10, 20,", "{'family': 'Times New Roman', 'weight': 'normal', 'size': 20} font2 = {'family': 'Times New", "New Roman', 'weight': 'normal', 'size': 16} labels = ['', '', '', ''] colors", "label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t) def plot_err3(data, is_iter, id,", "ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t = max(x) # return (l1,", "# return (l1, max_t) ## diff m if 0: for fid in range(1,", "plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout()", "if ytype == 'e': y = data[:, 1] - min_err else: y =", "1] reset = data[:, 2] nx = [] ny = [] for i", "is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() #", "if id == 0: label1 = 'ADMM' else: if is_dr: label1 = 'ours'", "in (1, 0): is_iter = 1 outer = 1 nmid = 1 aa_admm_m", "maxts = [] res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') #", "= {'family': 'Times New Roman', 'weight': 'normal', 'size': 20} font2 = {'family': 'Times", "= 1 aa_admm_m = 6 aa_dr_m = 6 for mu in (10, 100,", "2] #/ data[0, 2] if id == 0: label1 = 'ADMM' else: if", "in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] == 0:", "is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else:", "if 0: fid = 'monkey' # for outer in (0, 10, 20, 30,", "path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt)", "plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) +", "'-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter,", "0: fid = 'monkey' # for outer in (0, 10, 20, 30, 40,", "i in range(1, 7): # name = path + 'aadr' + str(i) +", "min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err # AA-DR each iters if 0:", "resetype): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x", "+ str(i) + '.txt' # res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1)", "0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) =", "path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) +", "mid = str(nmid) ls = [] maxts = [] min_err = find_minerr(path) print(min_err)", "font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name", "(1000, 5000, 10000, 50000, 100000): for outer in range(10, 11): for is_iter in", "31 / 255]) # 3 colors.append([125 / 255, 46 / 255, 141 /", "= np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter,", "= 0 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for mu", "= \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath =", "cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1,", "= \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter = 1 outer =", "(l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, 0) ls.append(l1)", "is_iter in (1, 0): is_iter = 1 outer = 1 nmid = 1", "# max_t = max(x) # return (l1, max_t) ## diff m if 0:", "255]) # 7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if", "+ ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" +", "nmid > 3: # res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt')", "diff m if 0: for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" #", "= plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter =", "label1 = 'ours' + resetype else: label1 = 'AA ADMM' + resetype if", "= plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7):", "176 / 255, 31 / 255]) # 3 colors.append([125 / 255, 46 /", "3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else:", "plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if ytype ==", "res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res,", "for i in range(1, 7): res = np.loadtxt(path + 'mid' + mid +", "in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath =", "for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid))", "import os from IPython.core.pylabtools import figsize font1 = {'family': 'Times New Roman', 'weight':", "'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err #", "20, 30, 40, 50, 60): # for outer in (0, 5, 10, 15,", "['', '', '', ''] colors = [] colors.append([0 / 255, 113 / 255,", "4, 100, 101, 102, 103, 104, 191, 192, 193, 194, 195): # for", "str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt)", "0, 0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for", "path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' +", "1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1,", "len1) else: x = data[:, 0] - data[0, 0] # y = data[:,", "= 'ours' + resetype else: label1 = 'AA ADMM' + resetype if ytype", "min_errs.append(min(res[:, 1])) for m in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' +", "0, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:,", "res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1)", "'-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter,", "2): for nmid in range(1, 2): # outer = 0 # # nmid", "path + 'mid0_mu10_m' + str(1) + '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:,", "= path + str(fid) + 'aaadmm' + str(i) + '.txt' res = np.loadtxt(name)", "0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7): res = np.loadtxt(path", "min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1]))", "nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3])", "data[:, 1] / data[0, 1] reset = data[:, 2] nx = [] ny", "plt.tight_layout() save_name = savepath + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" +", "name = path + str(fid) + 'aaadmm' + str(i) + '.txt' res =", "print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu if 0: for fid", "plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' outer ' +", "# ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '')", "2): mid = str(nmid) ls = [] maxts = [] maxis = []", "= data[:, 0] - data[0, 0] y = data[:, 1] / data[0, 1]", "np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path +", "in (100000, 50000, 10000, 5000, 1000, 500, 100): # for outer in (1000,", "1])) for m in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m)", "max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) + ' outer ' +", "in range(1, 2): mid = str(nmid) ls = [] maxts = [] res", "diff m each iters if 0: fid = 'monkey' # for outer in", "outer = 0 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for", "0 # # nmid = 1 mid = str(nmid) ls = [] maxts", "plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1, maxt)", "255]) # 6 colors.append([255 / 255, 128 / 255, 0 / 255]) #", "1: ytype = 'r' for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" #", "for is_iter in range(0, 2): mid = str(nmid) ls = [] maxts =", "linewidth=2) max_t = max(x) return (l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid,", "= savepath + str(fid) + \"emid_\" + mid + str(is_iter) + \"_outer_\" +", "+ str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for", "str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\" +", "plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1,", "maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1,", "if res3[eachiter, 2] == 0: # break iter = iter + 1 maxis.append(iter)", "np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') # else: res = np.loadtxt(path +", "'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid)", "+ str(fid) + 'aaadmm' + str(i) + '.txt' res = np.loadtxt(name) # min_err1", "1])) min_err = min(min_errs) return min_err # AA-DR each iters if 0: ytype", "= 'ADMM' else: if is_dr: label1 = 'DR m=' + str(id) else: label1", "aa_admm_m = 6 aa_dr_m = 6 for mu in (10, 100, 1000, 10000,", "= [] maxts = [] res = np.loadtxt(path + str(fid) + 'admm.txt') (l1,", "+ resetype if ytype == 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5)", "ADMM' + resetype if ytype == 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid],", "== 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best',", "data[:, 2] nx = [] ny = [] for i in range(1, len(reset)):", "+ \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu if 0:", "range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\"", "plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath", "if nmid > 3: # res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) +", "100): # for outer in (1000, 5000, 10000, 50000, 100000): for outer in", "is_iter in range(0, 2): for nmid in range(1, 3): mid = str(nmid) ls", "# plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1,", "is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1,", "is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for", "outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu if", "res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt')", "# plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): name = path +", "1 outer = 0 nmid = 1 aa_admm_m = 6 aa_dr_m = 6", "outer in (1000, 5000, 10000, 50000, 100000): for outer in range(10, 11): for", "ls = [] maxts = [] min_err = find_minerr(path) print(min_err) # min_err =", "if is_dr: label1 = 'ours' + resetype else: label1 = 'AA ADMM' +", "\"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter) +", "= 'DR m=' + str(id) else: label1 = 'AA m=' + str(id) l1,", "min(min_errs) return min_err # AA-DR each iters if 0: ytype = 'e' for", "str(id) else: label1 = 'AA m=' + str(id) l1, = plt.semilogy(x, y, label=label1,", "= data[:, 1] / data[0, 1] reset = data[:, 2] nx = []", "maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt)", "maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt)", "/ 255, 189 / 255, 237 / 255]) # 6 colors.append([255 / 255,", "1 mid = str(nmid) ls = [] maxts = [] res = np.loadtxt(path", "nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) # max_t = max(x) # return", "Roman', 'weight': 'normal', 'size': 16} labels = ['', '', '', ''] colors =", "data[0, 0] if ytype == 'e': y = data[:, 1] - min_err else:", "255, 176 / 255, 31 / 255]) # 3 colors.append([125 / 255, 46", "+ '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name = path + str(fid) +", "' + str(outer)) plt.tight_layout() save_name = savepath + str(fid) + \"emid_\" + mid", "plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid)", "range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] == 0: #", "colors.append([76 / 255, 189 / 255, 237 / 255]) # 6 colors.append([255 /", "outer in (0, 10, 20, 30, 40, 50, 60, 70, 80, 83): #", "str(outer) + '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer)", "+ str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' +", "id == 0: label1 = 'ADMM' else: if is_dr: label1 = 'ours' +", "'_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_'", "res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res,", "0 # print(path + 'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path +", "(l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt)", "+ \"_t\" + str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name,", "' mu ' + str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" + mid", "x = data[:, 0] - data[0, 0] if ytype == 'e': y =", "- data[0, 0] if ytype == 'e': y = data[:, 1] - min_err", "= path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1,", "+ str(fid)) plt.tight_layout() save_name = savepath + str(fid) + ytype + \"AA\" +", "savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter = 1 outer", "+ str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" +", "' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res2)): if", "maxt) = plot_err3(res, is_iter, 2, 1, 3, 0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if", "3]) # min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid", "str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf() # AA-DR test mu", "savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter in (1,", "(l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in", "aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter", "cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0,", "outer in (0, 10, 20, 30, 40, 50, 60): # for outer in", "plt.clf() # plt.show() # AA-DR if 1: ytype = 'r' for fid in", "colors.append([216 / 255, 82 / 255, 24 / 255]) # 2 colors.append([236 /", "= 1 for is_iter in range(0, 2): for nmid in range(1, 3): mid", "fid in range(1, 5): path = \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir", "# print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3", "str(fid) + '_' # for is_iter in (1, 0): is_iter = 1 outer", "maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ##", "30, 40, 50, 60, 70, 80, 83): # for outer in (0, 10,", "+ str(aa_dr_m) + '_outer' + str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu'", "'_mu' + str(outer)+ '_m' + str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter,", "res2[eachiter, 2] == 0: # break iter = iter + 1 maxis.append(iter) #", "range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] == 0: #", "50000, 10000, 5000, 1000, 500, 100): # for outer in (1000, 5000, 10000,", "= iter + 1 maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) = plot_err3(res3,", "else: y = data[:, 2] if id == 0: label1 = 'ADMM' else:", "+ 1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m,", "in range(1, 7): # name = path + 'aadr' + str(i) + '.txt'", "range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1]))", "str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf()", "\"mid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name,", "min_err = min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3", "if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20)", "if is_iter: x = np.linspace(0, len1, len1) else: x = data[:, 0] -", "reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i]) plt.scatter(nx, ny, color='blue', alpha=0.6, s=20) #", "''] colors = [] colors.append([0 / 255, 113 / 255, 188 / 255])", "40, 50, 60): # for outer in (0, 5, 10, 15, 20, 25):", "plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2)", "+ 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err", "str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu' +", "0: for fid in range(1, 5): path = \"data/coma_data/res/f\" + str(fid) + '_'", "2): for nmid in range(1, 3): mid = str(nmid) ls = [] maxts", "ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1)", "label1 = 'ADMM' else: if is_dr: label1 = 'ours' + resetype else: label1", "is_iter = 1 outer = 0 for is_iter in range(0, 2): for nmid", "if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1)", "+ str(1) + '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m", "0: label1 = 'ADMM' else: if is_dr: label1 = 'DR m=' + str(id)", "print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR')", "0] - data[0, 0] if ytype == 'e': y = data[:, 1] -", "print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each iters if 0: fid", "1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts))", "0]]): break # if res3[eachiter, 2] == 0: # break iter = iter", "if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break iter", "ls.append(l1) maxts.append(maxt) # for i in range(1, 7): # name = path +", "+ str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err # AA-DR", "(0, 5, 10, 15, 20, 25): # for outer in (0, 1, 2,", "os from IPython.core.pylabtools import figsize font1 = {'family': 'Times New Roman', 'weight': 'normal',", "1 colors.append([216 / 255, 82 / 255, 24 / 255]) # 2 colors.append([236", "3: # res = np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') # else:", "60, 70, 80, 83): # for outer in (0, 10, 20, 30, 40,", "= max(x) # return (l1, max_t) ## diff m if 0: for fid", "'') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0,", "label1 = 'AA ADMM' + resetype if ytype == 'r': l1, = plt.semilogy(x,", "= 6 aa_dr_m = 6 for is_iter in range(0, 2): mid = str(nmid)", "= plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid],", "'') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1,", "# plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu' + str(mu) +", "= plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t) def", "res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') # ADMM # data,", "y, label=label1, color=colors[cid], linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t", "193, 194, 195): # for outer in (100000, 50000, 10000, 5000, 1000, 500,", "'mid1_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' +", "else: label1 = 'AA ADMM' + resetype if ytype == 'r': l1, =", "path + str(fid) + 'aaadmm' + str(i) + '.txt' res = np.loadtxt(name) #", "= plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0", "# for is_iter in (1, 0): is_iter = 1 outer = 1 nmid", "# AA-DR each iters if 0: ytype = 'e' for fid in range(1,", "plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else:", "plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # plot_reset(res,", "in (1, 0): is_iter = 1 for is_iter in range(0, 2): for nmid", "ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu' +", "(l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0) ls.append(l1)", "+ str(outer) + '.txt') # else: res = np.loadtxt(path + 'mid0_mu' + str(outer)", "str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = []", "linewidth=2.5) else: l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return", "2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1)", "(0, 10, 20, 30, 40, 50, 60, 70, 80, 83): # for outer", "dpi=150) plt.clf() # diff m each iters if 0: fid = 'monkey' #", "res1 = np.loadtxt(name) # print(res) name = path + str(fid) + '_mid1_outer_0.txt' res2", "range(1, 2): # outer = 0 # # nmid = 1 mid =", "+ str(outer) + '_m1.txt') # print(path + 'mid1_m' + str(aa_admm_m) + '_outer' +", "is_iter, id, is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter: x", "ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file'", "maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter", "= iter + 1 maxis.append(iter) # print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2,", "dpi=150) plt.clf() # plt.show() # AA-DR if 1: ytype = 'r' for fid", "'_' # for is_iter in (1, 0): is_iter = 1 outer = 1", "= np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter,", "0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter", "5): path = \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid))", "savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter in (1, 0):", "# diff m each iters if 0: fid = 'monkey' # for outer", "is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt)", "' mu ' + str(outer)) plt.tight_layout() save_name = savepath + ytype + \"AA\"", "188 / 255]) # 1 colors.append([216 / 255, 82 / 255, 24 /", "255, 237 / 255]) # 6 colors.append([255 / 255, 128 / 255, 0", "11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile'", "(l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err) ls.append(l1)", "if 0: for fid in range(1, 11): path = \"D:/project/ADMMAA/BasisPursuit/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\"", "maxt) = plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt)", "# for outer in (1000, 5000, 10000, 50000, 100000): for outer in range(10,", "if 0: for fid in range(1, 5): path = \"data/coma_data/res/f\" + str(fid) +", "+ '.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m)", "res = np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err = min(min_err, min_err1)", "resetype if ytype == 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else:", "'.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) +", "'') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2,", "= path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1, maxt) = plot_err3(res3,", "max_t = max(x) return (l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if", "<filename>Fig6-splocs/plotenergy.py<gh_stars>1-10 #!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import os", "D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for", "1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in", "0, ytype, '-DRE') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\",", "[] colors.append([0 / 255, 113 / 255, 188 / 255]) # 1 colors.append([216", "47 / 255]) # 5 colors.append([76 / 255, 189 / 255, 237 /", "2): mid = str(nmid) ls = [] maxts = [] min_err = find_minerr(path)", "font1 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 20} font2 = {'family':", "= find_minerr(path, outer) min_err = 0 # if nmid > 3: # res", "linewidth=2.5) max_t = max(x) return (l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0]", "np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts))", "') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter,", "= plot_errmore(res, is_iter, i, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) #", "# 4 colors.append([118 / 255, 171 / 255, 47 / 255]) # 5", "np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt') # ADMM", "= np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid", "/ 255]) # 1 colors.append([216 / 255, 82 / 255, 24 / 255])", "0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in", "str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_' # for is_iter in (1,", "1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:, 1])) min_err", "Roman', 'weight': 'normal', 'size': 20} font2 = {'family': 'Times New Roman', 'weight': 'normal',", "'e': y = data[:, 1] - min_err else: y = data[:, 2] if", "ytype, resetype): name = path + str(fid) + '_mid3_outer_0.txt' res3 = np.loadtxt(name) (l1,", "[] maxis = [] name = path + str(fid) + '_mid0_outer_0.txt' res1 =", "label1 = 'DR m=' + str(id) else: label1 = 'AA m=' + str(id)", "maxts = [] # min_err = find_minerr(path, outer) min_err = 0 # if", "maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if", "resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ')", "= plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id],", "= 1 aa_admm_m = 6 aa_dr_m = 6 for is_iter in range(0, 2):", "np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') # ADMM # data, is_iter, id,", "plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) #", "+ str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 3, 0,", "+ '.txt' res = np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err =", "colors.append([125 / 255, 46 / 255, 141 / 255]) # 4 colors.append([118 /", "0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path", "as np import matplotlib.pyplot as plt import os from IPython.core.pylabtools import figsize font1", "name = path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0", "plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) + ' outer '", "0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for i in range(1, 7):", "= np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x = data[:,", "ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' +", "1000, 10000, 100000, 1000000): for is_iter in range(1, 2): mid = str(nmid) ls", "24 / 255]) # 2 colors.append([236 / 255, 176 / 255, 31 /", "ytype == 'r': l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) else: l1, =", "mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150)", "for outer in (0, 1, 2, 3, 4, 100, 101, 102, 103, 104,", "46 / 255, 141 / 255]) # 4 colors.append([118 / 255, 171 /", "D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_' # for is_iter", "7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x", "+ \"mid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name)", "83): # for outer in (0, 10, 20, 30, 40, 50, 60): #", "0 / 255]) # 7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1 =", "101, 102, 103, 104, 191, 192, 193, 194, 195): # for outer in", "= 1 mid = str(nmid) ls = [] maxts = [] res =", "color='blue', alpha=0.6, s=20) # max_t = max(x) # return (l1, max_t) ## diff", "0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter,", "'ADMM' else: if is_dr: label1 = 'DR m=' + str(id) else: label1 =", "/ 255, 47 / 255]) # 5 colors.append([76 / 255, 189 / 255,", "plt.tight_layout() save_name = savepath + str(fid) + \"emid_\" + mid + str(is_iter) +", "+ str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path + str(fid) +", "+ 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3 == 2,", "2): mid = str(nmid) ls = [] maxts = [] res = np.loadtxt(path", "outer in (0, 1, 2, 3, 4, 100, 101, 102, 103, 104, 191,", "alpha=0.6, s=20) # max_t = max(x) # return (l1, max_t) ## diff m", "if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break iter", "aa_dr_m = 6 for is_iter in range(0, 2): mid = str(nmid) ls =", "eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] ==", "255, 113 / 255, 188 / 255]) # 1 colors.append([216 / 255, 82", "str(outer) + '.txt') res3 = np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' +", "# for i in range(1, 7): # name = path + 'aadr' +", "str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') #", "== 2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for i in", "plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter)", "mu ' + str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" + mid +", "AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1)", "0): is_iter = 1 for is_iter in range(0, 2): for nmid in range(1,", "(l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): len1 =", "range(0, 2): mid = str(nmid) ls = [] maxts = [] maxis =", "else: plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu", "for nmid in range(1, 2): # outer = 0 # # nmid =", "min_errs = [] name = path + 'mid0_mu10_m' + str(1) + '.txt' #", "1] - min_err else: y = data[:, 2] if id == 0: label1", "np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7): res = np.loadtxt(path + 'mid1_mu10_m'", "+ 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2 = np.loadtxt(path", "else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' +", "+ ' mu ' + str(outer)) plt.tight_layout() save_name = savepath + \"mid_\" +", "name = path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name = path", "maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt') (l1,", "for is_iter in (1, 0): is_iter = 1 outer = 0 nmid =", "is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2)", "6 colors.append([255 / 255, 128 / 255, 0 / 255]) # 7 def", "10, 20, 30, 40, 50, 60, 70, 80, 83): # for outer in", "% 3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for", "str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype,", "print(res) name = path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name =", "in range(1, 2): # outer = 0 # # nmid = 1 mid", "= 0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if", "= plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) #", "str(outer)) plt.tight_layout() save_name = savepath + ytype + \"AA\" + str(aa_admm_m) + \"_DR\"", "= [] maxts = [] res = np.loadtxt(path + 'mid0_mu' + str(mu) +", "plot_errmore(res, is_iter, i, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # for", "# fid = 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath =", "# print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype,", "+ '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) +", "D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1, 0): is_iter", "in (0, 10, 20, 30, 40, 50, 60, 70, 80, 83): # for", "+ 'mid2_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1,", "(l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0,", "outer in (0, 5, 10, 15, 20, 25): for fid in range(1, 11):", "ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str(", "is_iter in (1, 0): is_iter = 1 for is_iter in range(0, 2): for", "plot_err3(res3, is_iter, aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0", "= plot_err3(res, is_iter, 0, 0, 0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) #", "iter = 0 for eachiter in range(1, len(res3)): if np.isnan([res3[eachiter, 0]]): break #", "\"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m", "else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) +", "for nmid in range(1, 3): mid = str(nmid) ls = [] maxts =", "= max(x) return (l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype,", "ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls,", "3 colors.append([125 / 255, 46 / 255, 141 / 255]) # 4 colors.append([118", "for i in range(1, 7): # name = path + 'aadr' + str(i)", "font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' outer '", "nmid = 1 aa_admm_m = 6 aa_dr_m = 6 for is_iter in range(0,", "y = data[:, 1] / data[0, 1] reset = data[:, 2] nx =", "min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7): res = np.loadtxt(path + 'mid'", "min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, ' ')", "max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0]", "len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break", "'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2)", "= plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter =", "## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1)", "\".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if 1: ytype", "New Roman', 'weight': 'normal', 'size': 20} font2 = {'family': 'Times New Roman', 'weight':", "6 aa_dr_m = 6 for is_iter in range(0, 2): mid = str(nmid) ls", "maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt')", "= \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter", "' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu)", "+ mid + str(is_iter) + \"_outer_\" + str(outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True,", "np.loadtxt(name) # print(res) name = path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name)", "5, 10, 15, 20, 25): for fid in range(1, 11): for outer in", "colors.append([0 / 255, 113 / 255, 188 / 255]) # 1 colors.append([216 /", "(0, 1, 2, 3, 4, 100, 101, 102, 103, 104, 191, 192, 193,", "outer = 0 for is_iter in range(0, 2): for nmid in range(1, 2):", "'', ''] colors = [] colors.append([0 / 255, 113 / 255, 188 /", "ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]):", "np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err,", "# print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7): res", "plt.xlabel(\"#Iters\", font1) plt.xlim(0, max(maxis)) else: plt.xlabel(\"Time(s)\", font1) plt.xlim(0, max(maxts)) # plt.xlim(0, 50) if", "+ str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0,", "+ '_mu' + str(outer)+ '_m' + str(i) + '.txt') (l1, maxt) = plot_errmore(res,", "4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" #", "= 'monkey' path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' +", "nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # for i in range(1,", "plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t) def plot_err3(data,", "[] name = path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res)", "min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m,", "\"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file'", "outer in (100000, 50000, 10000, 5000, 1000, 500, 100): # for outer in", "0): is_iter = 1 outer = 0 nmid = 1 aa_admm_m = 6", "'_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1, is_iter, 0,", "font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) +", "+ str(id) else: label1 = 'AA m=' + str(id) l1, = plt.semilogy(x, y,", "in range(10, 11): for is_iter in range(0, 2): mid = str(nmid) ls =", "np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid %", "'_m' + str(aa_dr_m) + '.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter, 0,", "return (l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): len1", "res2 = np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt')", "255, 46 / 255, 141 / 255]) # 4 colors.append([118 / 255, 171", "255]) # 3 colors.append([125 / 255, 46 / 255, 141 / 255]) #", "str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid)", "label=label1, color=colors[id], linewidth=2.5) # l1, = plt.plot(x, y, label=label1, color=colors[id], linewidth=2) max_t =", "= np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter,", "is_iter = 1 outer = 1 nmid = 1 aa_admm_m = 6 aa_dr_m", "if is_dr: label1 = 'DR m=' + str(id) else: label1 = 'AA m='", "103, 104, 191, 192, 193, 194, 195): # for outer in (100000, 50000,", "outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath =", "range(0, 2): mid = str(nmid) ls = [] maxts = [] min_err =", "is_dr: label1 = 'ours' + resetype else: label1 = 'AA ADMM' + resetype", "/ 255, 113 / 255, 188 / 255]) # 1 colors.append([216 / 255,", "str(is_iter) + \"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=600) plt.clf()", "min_err = 0 # if nmid > 3: # res = np.loadtxt(path +", "outer ' + str(outer)) plt.tight_layout() save_name = savepath + str(fid) + \"emid_\" +", "100000, 1000000): for is_iter in range(1, 2): mid = str(nmid) ls = []", "3 == 2, 0) ls.append(l1) maxts.append(maxt) # for i in range(1, 7): #", "plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t) def plot_reset(data,", "/ 255]) # 2 colors.append([236 / 255, 176 / 255, 31 / 255])", "# break iter = iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0,", "= 6 for is_iter in range(0, 2): mid = str(nmid) ls = []", "2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)):", "'mid0_mu10_m' + str(1) + '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for", "'.txt') # ADMM # data, is_iter, id, is_dr, cid, min_err, ytype, resetype (l1,", "# for is_iter in (1, 0): is_iter = 1 outer = 0 for", "is_iter: x = np.linspace(0, len1, len1) else: x = data[:, 0] - data[0,", "+ '_outer' + str(outer) + '.txt') res2 = np.loadtxt(path + 'mid1_mu' + str(outer)", "is_iter in (1, 0): is_iter = 1 outer = 0 for is_iter in", "savepath + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\"", "'_' # for is_iter in (1, 0): is_iter = 1 outer = 0", "ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path + 'mid1_mu' + str(mu) + '.txt')", "'mid2_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 2,", "str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype,", "#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import os from", "'weight': 'normal', 'size': 16} labels = ['', '', '', ''] colors = []", "128 / 255, 0 / 255]) # 7 def plot_errmore(data, is_iter, id, is_dr,", "import numpy as np import matplotlib.pyplot as plt import os from IPython.core.pylabtools import", "in (1, 0): is_iter = 1 outer = 0 for is_iter in range(0,", "+ str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name = savepath +", "= savepath + str(fid) + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" +", "colors.append([255 / 255, 128 / 255, 0 / 255]) # 7 def plot_errmore(data,", "'normal', 'size': 20} font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 16}", "str(i) + '.txt' res = np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err", "break # if res2[eachiter, 2] == 0: # break iter = iter +", "maxis.append(iter) # print(iter) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2,", "cid, min_err, ytype, resetype): name = path + str(fid) + '_mid3_outer_0.txt' res3 =", "+ str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1)", "- data[0, 0] y = data[:, 1] / data[0, 1] reset = data[:,", "+ 'aadr' + str(i) + '.txt' # res = np.loadtxt(name) # if is_iter:", "= 6 aa_dr_m = 6 for mu in (10, 100, 1000, 10000, 100000,", "1 outer = 1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6", "for outer in (0, 5, 10, 15, 20, 25): for fid in range(1,", "= data[:, 0] - data[0, 0] if ytype == 'e': y = data[:,", "1] / data[0, 1] reset = data[:, 2] nx = [] ny =", "+ \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if 1:", "'.txt' res = np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err = min(min_err,", "7): name = path + str(fid) + 'aaadmm' + str(i) + '.txt' res", "= 1 aa_admm_m = 6 aa_dr_m = 6 # for outer in (0,", "in range(1, 11): for outer in range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid)", "# res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\",", "find_minerr(path) print(min_err) # min_err = 0 # print(path + 'mid0_outer' + str(outer) +", "is_iter, i, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # for i", "+ str(outer)+ '_m' + str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter, i,", "== 2, 0) ls.append(l1) maxts.append(maxt) # for i in range(1, 7): # name", "maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype,", "ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]):", "font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\", font1) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' +", "plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e':", "ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res3)): if", "+ str(fid) + \"emid_\" + mid + str(is_iter) + \"_outer_\" + str(outer) +", "= str(nmid) ls = [] maxts = [] maxis = [] name =", "(0, 10, 20, 30, 40, 50, 60): # for outer in (0, 5,", "171 / 255, 47 / 255]) # 5 colors.append([76 / 255, 189 /", "plot_errmore(res, is_iter, 0, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err", "for is_iter in (1, 0): is_iter = 1 outer = 1 nmid =", "/ 255, 82 / 255, 24 / 255]) # 2 colors.append([236 / 255,", "+ str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff m each", "res2 = np.loadtxt(name) name = path + str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name)", "\"_t\" + str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True,", "50, 60): # for outer in (0, 5, 10, 15, 20, 25): #", "'.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype,", "(1, 0): is_iter = 1 outer = 0 for is_iter in range(0, 2):", "0: # break iter = iter + 1 maxis.append(iter) if is_iter: plt.xlabel(\"#Iters\", font1)", "aa_admm_m = 6 aa_dr_m = 6 for is_iter in range(0, 2): mid =", "plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if 1: ytype = 'r'", "min_err,ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1, len(res2)):", "0]]): break # if res2[eachiter, 2] == 0: # break iter = iter", "0] if ytype == 'e': y = data[:, 1] - min_err else: y", "= 'ADMM' else: if is_dr: label1 = 'ours' + resetype else: label1 =", "plt.ylabel('Combined residual', font1) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu '", "= 'AA m=' + str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) #", "save_name = savepath + \"mid_\" + mid + str(is_iter) + \"_outer_\" + str(outer)", "str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\" + str(outer) + \"_5.png\" print(save_name)", "'_m' + str(i) + '.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid %", "each iters if 0: fid = 'monkey' # for outer in (0, 10,", "= plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) #", "# print(res) name = path + str(fid) + '_mid1_outer_0.txt' res2 = np.loadtxt(name) name", "else: if is_dr: label1 = 'ours' + resetype else: label1 = 'AA ADMM'", "3 == 2, 0) ls.append(l1) maxts.append(maxt) # min_err = min(res[:, 3]) for i", "AA-DR test mu if 0: for fid in range(1, 5): path = \"data/coma_data/res/f\"", "str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt) = plot_err3(res1,", "for is_iter in range(0, 2): for nmid in range(1, 3): mid = str(nmid)", "= np.loadtxt(path + 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt') #", "y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t) def plot_reset(data, is_iter):", "plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter:", "+ str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\" + str( is_iter) + \"_outer_\"", "str(fid) + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m) + \"_t\"", "(l1, maxt) = plot_err3(res, is_iter, 1, 0, 1, 0, ytype, ' ') ls.append(l1)", "+ str(fid) + '_mid2_outer_0.txt' res3 = np.loadtxt(name) # ADMM min_err=0 (l1, maxt) =", "ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) ## AA-DR res = np.loadtxt(path + 'mid2_mu' +", "[] res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt') # ADMM #", "maxts = [] min_err = find_minerr(path) print(min_err) # min_err = 0 # print(path", "maxts.append(maxt) iter = 0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break", "# min_err = find_minerr(path, outer) min_err = 0 # if nmid > 3:", "path = \"data/coma_data/res/f\" + str(fid) + '_' # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath", "+ \"_outer_\" + str(outer) + \"_5.png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=150) plt.clf() # diff", "= 0 # print(path + 'mid0_outer' + str(outer) + '.txt') res1 = np.loadtxt(path", "str(m) + '.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m) +", "[] name = path + 'mid0_mu10_m' + str(1) + '.txt' # print(name) res", "maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt)", "/ 255]) # 7 def plot_errmore(data, is_iter, id, is_dr, min_err): len1 = np.shape(data)[0]", "break # if res3[eachiter, 2] == 0: # break iter = iter +", "'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m'", "1 nmid = 1 aa_admm_m = 6 aa_dr_m = 6 # for outer", "is_iter in range(0, 2): mid = str(nmid) ls = [] maxts = []", "+ 'mid2_mu' + str(outer) + '_m' + str(aa_dr_m) + '.txt') # ADMM (l1,", "loc='best', prop=font2) plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name", "# print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): name =", "plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) #", "+ str(outer)) plt.tight_layout() save_name = savepath + ytype + \"AA\" + str(aa_admm_m) +", "transparent=True, dpi=600) plt.clf() # AA-DR test mu if 0: for fid in range(1,", "np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] if ytype", "# /data[0, 1] y = data[:, 2] #/ data[0, 2] if id ==", "transparent=True, dpi=150) plt.clf() # plt.show() # AA-DR if 1: ytype = 'r' for", "str(1) + '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in", "1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1500)", "data, is_iter, id, is_dr, cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter,", "str(fid) + ' mu ' + str(outer)) plt.tight_layout() save_name = savepath + ytype", "255, 31 / 255]) # 3 colors.append([125 / 255, 46 / 255, 141", "else: res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) =", "# outer = 0 # # nmid = 1 mid = str(nmid) ls", "[] maxts = [] maxis = [] name = path + str(fid) +", "# print(path + 'mid1_m' + str(aa_admm_m) + '_outer' + str(outer) + '.txt') res2", "in range(1, 7): res = np.loadtxt(path + 'mid' + mid + '_mu' +", "1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter in range(1,", "np.loadtxt(path + 'mid2_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2,", "is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1)", "outer in range(10, 11): for is_iter in range(0, 2): mid = str(nmid) ls", "6 for is_iter in range(0, 2): mid = str(nmid) ls = [] maxts", "= data[:, 2] nx = [] ny = [] for i in range(1,", "else: x = data[:, 0] - data[0, 0] y = data[:, 1] /", "= plot_err3(res1, is_iter, 0, 0, 0, min_err, ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM", "maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) #", "is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for i in range(1, 7): res =", "is_iter, 0, 0, 0, min_err, ytype, ' ') ls.append(l1) maxts.append(maxt) iter = 0", "# plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1)", "is_dr, cid, min_err, ytype, resetype): name = path + str(fid) + '_mid3_outer_0.txt' res3", "data[:, 0] - data[0, 0] y = data[:, 1] / data[0, 1] reset", "# os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\test_mu\\\\file\" + str(fid)) savepath = 'data/coma_data/fig/file' + str(fid) + '_' #", "np.loadtxt(path + 'mid1_mu' + str(outer) + '_m' + str(aa_admm_m) + '.txt') # print(path", "np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2,", "font1) plt.xlim(0, 1000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file'", "1500) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else:", "+ '_m' + str(aa_admm_m) + '.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) +", "50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\", font1) # plt.ylabel(\"f(x)+g(z)\",", "+ '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1, 2, 0, ytype, '-PR')", "'.txt') # print(path + 'res_mid2_m' + str(aa_dr_m) + '_outer' + str(outer) + '.txt')", "aa_dr_m, 1, 3, min_err, ytype, '-DRE') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter", "plt.xlabel(\"#Iters\", font1) plt.xlim(0, 2000) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.ylabel(\"Combined residual\", font1) plt.legend(handles=ls,", "'_mid0_outer_0.txt' res1 = np.loadtxt(name) # print(res) name = path + str(fid) + '_mid1_outer_0.txt'", "min_err else: y = data[:, 2] if id == 0: label1 = 'ADMM'", "for is_iter in range(1, 2): mid = str(nmid) ls = [] maxts =", "id, is_dr, min_err): len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1)", "+ str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, nmid % 3", "= np.loadtxt(name) # min_err1 = min(res[:, 3]) # min_err = min(min_err, min_err1) (l1,", "+ 'mid3_mu' + str(mu) + '.txt') (l1, maxt) = plot_err3(res, is_iter, 2, 1,", "str(nmid) ls = [] maxts = [] # min_err = find_minerr(path, outer) min_err", "(1, 0): is_iter = 1 outer = 0 nmid = 1 aa_admm_m =", "0) ls.append(l1) maxts.append(maxt) # for i in range(1, 7): # name = path", "= np.loadtxt(name) min_errs.append(min(res[:, 1])) for m in range(1, 7): res = np.loadtxt(path +", "# if nmid > 3: # res = np.loadtxt(path + 'mid3_m5_outer' + str(outer)", "'mid0_mu' + str(outer) + '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err)", "'aaadmm' + str(i) + '.txt' res = np.loadtxt(name) # min_err1 = min(res[:, 3])", "for outer in (0, 10, 20, 30, 40, 50, 60): # for outer", "AA-DR (l1, maxt) = plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt)", "189 / 255, 237 / 255]) # 6 colors.append([255 / 255, 128 /", "plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) + ' mu ' + str(outer)) plt.tight_layout()", "+ '_m1.txt') (l1, maxt) = plot_errmore(res, is_iter, 0, 0, min_err) ls.append(l1) maxts.append(maxt) for", "str(fid) + ' outer ' + str(outer)) plt.tight_layout() save_name = savepath + \"mu\"", "find_minerr(path, outer) min_err = 0 # if nmid > 3: # res =", "'.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') # print(path +", "5, 10, 15, 20, 25): # for outer in (0, 1, 2, 3,", "\".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = [] name =", "range(10, 11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' +", "min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, 0)", "in (0, 5, 10, 15, 20, 25): for fid in range(1, 11): for", "= [] name = path + 'mid0_mu10_m' + str(1) + '.txt' # print(name)", "id, is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter: x =", "'r' for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\" # os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" +", "1, 2, 3, 4, 100, 101, 102, 103, 104, 191, 192, 193, 194,", "i, nmid % 3 == 2, 0) ls.append(l1) maxts.append(maxt) # for i in", "AA-DR each iters if 0: ytype = 'e' for fid in range(1, 11):", "is_iter) res = np.loadtxt(path + 'mid3_mu' + str(mu) + '.txt') (l1, maxt) =", "for fid in range(1, 5): path = \"data/coma_data/res/f\" + str(fid) + '_' #", "2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res = np.loadtxt(path +", "+ '.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err # AA-DR each iters", "str(nmid) ls = [] maxts = [] maxis = [] name = path", "range(1, 7): name = path + str(fid) + 'aaadmm' + str(i) + '.txt'", "for i in range(1, len(reset)): if reset[i] > reset[i - 1]: nx.append(x[i]) ny.append(y[i])", "break iter = iter + 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id,", "max_t = max(x) # return (l1, max_t) ## diff m if 0: for", "15, 20, 25): # for outer in (0, 1, 2, 3, 4, 100,", "[] maxts = [] min_err = find_minerr(path) print(min_err) # min_err = 0 #", "= min(min_errs) return min_err # AA-DR each iters if 0: ytype = 'e'", "# for outer in (0, 5, 10, 15, 20, 25): for fid in", "1, min_err, ytype, '') ls.append(l1) maxts.append(maxt) ## AA-DR (l1, maxt) = plot_err3(res3, is_iter,", "for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2]", "+ '.txt') # ADMM # data, is_iter, id, is_dr, cid, min_err, ytype, resetype", "0, 0, ytype, ' ') ls.append(l1) maxts.append(maxt) # AA-ADMM res = np.loadtxt(path +", "/ 255, 141 / 255]) # 4 colors.append([118 / 255, 171 / 255,", "= 0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if", "path + 'aadr' + str(i) + '.txt' # res = np.loadtxt(name) # if", "nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0,", "= \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' # for is_iter in (1, 0):", "import matplotlib.pyplot as plt import os from IPython.core.pylabtools import figsize font1 = {'family':", "= np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] #", "savepath = 'data/coma_data/fig/file' + str(fid) + '_' # for is_iter in (1, 0):", "# min_err = min(res[:, 3]) for i in range(1, 7): name = path", "255]) # 2 colors.append([236 / 255, 176 / 255, 31 / 255]) #", "20, 30, 40, 50, 60, 70, 80, 83): # for outer in (0,", "+ str(outer) + '.txt') res1 = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt')", "# 2 colors.append([236 / 255, 176 / 255, 31 / 255]) # 3", "(100000, 50000, 10000, 5000, 1000, 500, 100): # for outer in (1000, 5000,", "0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter,", "' outer ' + str(outer)) plt.tight_layout() save_name = savepath + \"mu\" + str(mu)", "'_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter in (1,", "- data[0, 0] # y = data[:, 2] # /data[0, 1] y =", "= 6 for mu in (10, 100, 1000, 10000, 100000, 1000000): for is_iter", "/ 255, 31 / 255]) # 3 colors.append([125 / 255, 46 / 255,", "l1, = plt.semilogy(x, y, label=label1, color=colors[cid], linewidth=2.5) max_t = max(x) return (l1, max_t)", "str(nmid) ls = [] maxts = [] min_err = find_minerr(path) print(min_err) # min_err", "data[:, 2] # /data[0, 1] y = data[:, 2] #/ data[0, 2] if", "# for outer in (0, 10, 20, 30, 40, 50, 60): # for", "reset = data[:, 2] nx = [] ny = [] for i in", "11): path = \"D:/project/ADMMAA/sparseicp/testmu/0_p5.0_f\" + str(fid) + '_' savepath = 'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid)", "as plt import os from IPython.core.pylabtools import figsize font1 = {'family': 'Times New", "'Times New Roman', 'weight': 'normal', 'size': 16} labels = ['', '', '', '']", "+ str(aa_dr_m) + \"_t\" + str(is_iter) + \"_outer_\" + str( outer) + \".png\"", "for outer in (0, 5, 10, 15, 20, 25): # for outer in", "= 0 # if nmid > 3: # res = np.loadtxt(path + 'mid3_m5_outer'", "+ str(mu) + '.txt') # ADMM # data, is_iter, id, is_dr, cid, min_err,", "/ 255, 171 / 255, 47 / 255]) # 5 colors.append([76 / 255,", "for outer in (100000, 50000, 10000, 5000, 1000, 500, 100): # for outer", "= [] res = np.loadtxt(path + str(fid) + 'admm.txt') (l1, maxt) = plot_errmore(res,", "name = path + 'aadr' + str(i) + '.txt' # res = np.loadtxt(name)", "save_name = savepath + ytype + \"AA\" + str(aa_admm_m) + \"_DR\" + str(aa_dr_m)", "40, 50, 60, 70, 80, 83): # for outer in (0, 10, 20,", "ytype, '') ls.append(l1) maxts.append(maxt) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0,", "= np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+ '_m' + str(i)", "np import matplotlib.pyplot as plt import os from IPython.core.pylabtools import figsize font1 =", "find_minerr(path): min_errs = [] name = path + 'mid0_mu10_m' + str(1) + '.txt'", "max(maxts)) # plt.xlim(0, 50) if ytype == 'r': plt.ylabel(\"Combined residual\", font1) else: plt.ylabel(\"f(x)+g(z)\",", "plot_err3(res3, is_iter, aa_dr_m, 1, 2, min_err, ytype, '') ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\",", "maxis = [] name = path + str(fid) + '_mid0_outer_0.txt' res1 = np.loadtxt(name)", "else: if is_dr: label1 = 'DR m=' + str(id) else: label1 = 'AA", "transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs = [] name = path + 'mid0_mu10_m'", "np.isnan([res3[eachiter, 0]]): break # if res3[eachiter, 2] == 0: # break iter =", "len(res2)): if np.isnan([res2[eachiter, 0]]): break # if res2[eachiter, 2] == 0: # break", "mu if 0: for fid in range(1, 5): path = \"data/coma_data/res/f\" + str(fid)", "= data[:, 2] # /data[0, 1] y = data[:, 2] #/ data[0, 2]", "id, is_dr, cid, min_err, ytype, resetype (l1, maxt) = plot_err3(res, is_iter, 0, 0,", "'.txt') min_errs.append(min(res[:, 1])) res = np.loadtxt(path + 'mid2_mu10_m' + str(m) + '.txt') min_errs.append(min(res[:,", "+ '.txt' # res = np.loadtxt(name) # if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 1000)", "100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file' + str(fid) +", "iter + 1 maxis.append(iter) # print(iter) # plot_err3(data, is_iter, id, is_dr, cid, min_err,", "colors.append([118 / 255, 171 / 255, 47 / 255]) # 5 colors.append([76 /", "\"_t\" + str(is_iter) + \"_outer_\" + str( outer) + \".png\" print(save_name) plt.savefig(save_name, transparent=True,", "= data[:, 1] - min_err else: y = data[:, 2] if id ==", "= max(x) return (l1, max_t) def plot_reset(data, is_iter): len1 = np.shape(data)[0] if is_iter:", "plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid) + ' outer ' + str(outer))", "+ 'mid0_mu10_m' + str(1) + '.txt' # print(name) res = np.loadtxt(name) min_errs.append(min(res[:, 1]))", "+ '_m' + str(aa_dr_m) + '.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter,", "AA-DR if 1: ytype = 'r' for fid in range(1, 4): path =", "for is_iter in (1, 0): is_iter = 1 outer = 0 for is_iter", "maxts.append(maxt) iter = 0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break", "in range(1, 3): mid = str(nmid) ls = [] maxts = [] #", "len1 = np.shape(data)[0] if is_iter: x = np.linspace(0, len1, len1) else: x =", "ls = [] maxts = [] res = np.loadtxt(path + 'mid0_mu' + str(mu)", "if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best',", "min(min_err, min_err1) (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2,", "is_iter, i, nmid % 3 == 2, min_err) ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\",", "255, 128 / 255, 0 / 255]) # 7 def plot_errmore(data, is_iter, id,", "1000, 500, 100): # for outer in (1000, 5000, 10000, 50000, 100000): for", "(l1, max_t) ## diff m if 0: for fid in range(1, 11): path", "0] # y = data[:, 2] # /data[0, 1] y = data[:, 2]", "'D:/project/ADMMAA/sparseicp/figp05nreset/1_f' + str(fid) + '_' # for is_iter in (1, 0): is_iter =", "is_iter in range(1, 2): mid = str(nmid) ls = [] maxts = []", "is_iter, 2, 1, 2, 0, ytype, '-PR') ls.append(l1) maxts.append(maxt) # plot_reset(res, is_iter) res", "color=colors[id], linewidth=2) max_t = max(x) return (l1, max_t) def plot_err3(data, is_iter, id, is_dr,", "# min_err = 0 # print(path + 'mid0_outer' + str(outer) + '.txt') res1", "'.txt') (l1, maxt) = plot_errmore(res, is_iter, i, nmid % 3 == 2, min_err)", "AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1)", "os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/BasisPursuit/fig/\" #'D:/project/ADMMAA/data/diffpm/p0.5/fig/2efile' + str(fid) + '_' #", "'.txt') # else: res = np.loadtxt(path + 'mid0_mu' + str(outer) + '_m1.txt') (l1,", "= [] maxis = [] name = path + str(fid) + '_mid0_outer_0.txt' res1", "# os.system(\"mkdir D:\\\\project\\\\ADMMAA\\\\data\\\\no_noise_fig3\\\\file\" + str(fid)) savepath = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/fig/\" # for is_iter in (1,", "plt.legend(handles=ls, loc='best', prop=font2) # plt.title('file' + str(fid)) plt.tight_layout() save_name = savepath + str(fid)", "if 1: ytype = 'r' for fid in range(1, 4): path = \"D:/project/ADMMAA/localdeformation/splocs-master/data/coma_data/coma_data/res/\"", "else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) if ytype == 'e': plt.ylabel('Energy', font1) else: plt.ylabel('Combined", "+ str(aa_dr_m) + '.txt') # ADMM (l1, maxt) = plot_err3(res1, is_iter, 0, 0,", "else: x = data[:, 0] - data[0, 0] # y = data[:, 2]", "is_iter, aa_dr_m, 1, 2, min_err,ytype, '-PR') ls.append(l1) maxts.append(maxt) iter = 0 for eachiter", "m=' + str(id) l1, = plt.semilogy(x, y, label=label1, color=colors[id], linewidth=2.5) # l1, =", "plot_err3(data, is_iter, id, is_dr, cid, min_err, ytype, resetype): len1 = np.shape(data)[0] if is_iter:", "' outer ' + str(outer)) plt.tight_layout() save_name = savepath + str(fid) + \"emid_\"", "= [] for i in range(1, len(reset)): if reset[i] > reset[i - 1]:", "iter = 0 for eachiter in range(1, len(res2)): if np.isnan([res2[eachiter, 0]]): break #", "# 1 colors.append([216 / 255, 82 / 255, 24 / 255]) # 2", "plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts)) plt.legend(handles=ls, loc='best', prop=font2) plt.title('file'", "np.loadtxt(path + 'mid' + mid + '_mu' + str(outer)+ '_m' + str(i) +", "print(iter) # AA-ADMM (l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err,ytype, '", "'.txt') min_errs.append(min(res[:, 1])) min_err = min(min_errs) return min_err # AA-DR each iters if", "max_t = max(x) return (l1, max_t) def plot_err3(data, is_iter, id, is_dr, cid, min_err,", "y = data[:, 2] if id == 0: label1 = 'ADMM' else: if", "2] # /data[0, 1] y = data[:, 2] #/ data[0, 2] if id", "0 for eachiter in range(1, len(res1)): if np.isnan([res1[eachiter, 0]]): break # if res3[eachiter,", "nmid = 1 aa_admm_m = 6 aa_dr_m = 6 # for outer in", "[] maxts = [] res = np.loadtxt(path + 'mid0_mu' + str(mu) + '.txt')", "== 0: label1 = 'ADMM' else: if is_dr: label1 = 'DR m=' +", "ls.append(l1) maxts.append(maxt) if is_iter: plt.xlabel(\"#Iters\", font1) plt.xlim(0, 100) else: plt.xlabel(\"Time(ms)\", font1) plt.xlim(0, max(maxts))", "(l1, maxt) = plot_err3(res2, is_iter, aa_admm_m, 0, 1, min_err, ytype, '') ls.append(l1) maxts.append(maxt)", "loc='best', prop=font2) # plt.title('file' + str(fid) + ' outer ' + str(outer)) plt.tight_layout()", "np.linspace(0, len1, len1) else: x = data[:, 0] - data[0, 0] y =", "font2 = {'family': 'Times New Roman', 'weight': 'normal', 'size': 16} labels = ['',", "0 for is_iter in range(0, 2): for nmid in range(1, 2): # outer", "+ str(mu) + \".png\" print(save_name) plt.savefig(save_name, transparent=True, dpi=100) plt.clf() def find_minerr(path): min_errs =", "= np.loadtxt(path + 'mid3_m5_outer' + str(outer) + '.txt') # else: res = np.loadtxt(path" ]
[ "('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', },", "[ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True,", "'0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),", "field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name='收藏者'), ),", "migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set',", "verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set',", "'收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content',", "-*- # Generated by Django 1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals", "fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ],", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo',", "verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录',", "field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection',", "models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name':", "field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question',", "related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name='收藏者'), ), ]", "# Generated by Django 1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals import", "utf-8 -*- # Generated by Django 1.11.20 on 2019-08-11 12:46 from __future__ import", "migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'),", "('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={", "], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True,", "import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "-*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-08-11 12:46 from", "primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录',", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'),", "[ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status',", "name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')),", "on 2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf import settings", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ]", "model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField(", "ckeditor_uploader.fields from django.conf import settings from django.db import migrations, models import django.db.models.deletion class", "}, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True,", "('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions',", "1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf import", "= [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id',", "name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL, verbose_name='收藏者'),", "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL),", "import ckeditor_uploader.fields from django.conf import settings from django.db import migrations, models import django.db.models.deletion", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations =", "django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "= [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')),", "settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ),", "), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'),", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [", "dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[", "null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer',", "unicode_literals import ckeditor_uploader.fields from django.conf import settings from django.db import migrations, models import", "migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True,", "('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False,", "by Django 1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from", "serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural':", "model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ),", "'收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField(", "), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions',", "from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf import settings from django.db import", "name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection',", "Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionsCollection',", "] operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time',", "Django 1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf", "'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ),", "'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions',", "from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations", "coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-08-11 12:46 from __future__", "# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-08-11 12:46", "12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf import settings from django.db", "model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to=settings.AUTH_USER_MODEL,", "2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields from django.conf import settings from", "migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True, verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True,", "migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案'), ), migrations.AlterField( model_name='questions', name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ),", "name='content', field=ckeditor_uploader.fields.RichTextUploadingField(null=True, verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField(", "import unicode_literals import ckeditor_uploader.fields from django.conf import settings from django.db import migrations, models", "verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True,", "class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('repo', '0001_initial'), ] operations = [ migrations.CreateModel(", "verbose_name='题目详情'), ), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user',", "__future__ import unicode_literals import ckeditor_uploader.fields from django.conf import settings from django.db import migrations,", "), migrations.AddField( model_name='questionscollection', name='question', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions_collection_set', to='repo.Questions', verbose_name='问题'), ), migrations.AddField( model_name='questionscollection', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,", "Generated by Django 1.11.20 on 2019-08-11 12:46 from __future__ import unicode_literals import ckeditor_uploader.fields", "operations = [ migrations.CreateModel( name='QuestionsCollection', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('create_time', models.DateTimeField(auto_now=True,", "verbose_name='收藏/取消时间')), ('status', models.BooleanField(default=True, verbose_name='收藏状态')), ], options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField(", "options={ 'verbose_name': '收藏记录', 'verbose_name_plural': '收藏记录', }, ), migrations.AlterField( model_name='questions', name='answer', field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='题目答案')," ]
[ "c, s, e): self.client = c self.start = s self.end = e #self.start", "s, e): self.client = c self.start = s self.end = e #self.start =", "Route: def __init__(self, c, s, e): self.client = c self.start = s self.end", "def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return", "= c self.start = s self.end = e #self.start = self.get_system(s) #self.end =", "class Route: def __init__(self, c, s, e): self.client = c self.start = s", "self.end = e #self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self)", "system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s): result", "self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names(", "str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result()", "ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination( datasource=\"tranquility\", flag=\"shortest\", origin=self.start, destination=self.end ).result()", "#self.end = self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids = self.get_distance() return", "= s self.end = e #self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self):", "return str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids])", "__str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s):", "<gh_stars>0 class Route: def __init__(self, c, s, e): self.client = c self.start =", "__repr__(self): return str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in", "result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination( datasource=\"tranquility\", flag=\"shortest\",", "s self.end = e #self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return", "return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s]", "= self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids =", "= e #self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self) def", "def __repr__(self): return str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id", "in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def", "s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination( datasource=\"tranquility\",", "self.client = c self.start = s self.end = e #self.start = self.get_system(s) #self.end", "self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids = self.get_distance()", "get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination(", "= self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self, s): result =", "system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self):", "self.start = s self.end = e #self.start = self.get_system(s) #self.end = self.get_system(e) def", "self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for", "= self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination( datasource=\"tranquility\", flag=\"shortest\", origin=self.start,", "__init__(self, c, s, e): self.client = c self.start = s self.end = e", "e): self.client = c self.start = s self.end = e #self.start = self.get_system(s)", "id in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return result[0]", "c self.start = s self.end = e #self.start = self.get_system(s) #self.end = self.get_system(e)", "str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def", "e #self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self) def __str__(self):", "def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"] for id in system_ids]) def get_system(self,", "for id in system_ids]) def get_system(self, s): result = self.client.Universe.post_universe_names( ids=[s] ).result() return", "self.client.Universe.post_universe_names( ids=[s] ).result() return result[0] def get_distance(self): return self.client.Routes.get_route_origin_destination( datasource=\"tranquility\", flag=\"shortest\", origin=self.start, destination=self.end", "= self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids = self.get_distance() return str([self.get_system(id)[\"name\"]", "def __init__(self, c, s, e): self.client = c self.start = s self.end =", "#self.start = self.get_system(s) #self.end = self.get_system(e) def __repr__(self): return str(self) def __str__(self): system_ids" ]
[ "needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second eprops = set()", "for key, value in data.items(): gtG.ep[key][e] = value # ep is short for", "key is provided, it also ensures the key is in a format that", "float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] =", "value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else: tname = 'string' value", "'string' value = str(value) return tname, value, key def nx2gt(nxG): \"\"\" Converts a", "v # Set the vertex properties, not forgetting the id property data['id'] =", "properties first nprops = set() # cache keys to only add properties once", "# Phase 0: Create a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed())", "already added # Convert the value and key into a type for graph-tool", "Set the PropertyMap gtG.graph_properties[key] = value # Set the actual value # Phase", "tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...')", "PropertyMap # Add the key to the already seen properties nprops.add(key) # Also", "= key.encode('ascii', errors='replace') # Deal with the value if isinstance(value, bool): tname =", "edges later v = gtG.add_vertex() vertices[node] = v # Set the vertex properties,", "if not seen and add them. for key, val in data.items(): if key", "= set() # cache keys to only add properties once for src, dst,", "already seen properties nprops.add(key) # Also add the node id: in NetworkX a", "= {} with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs", "in nxG.edges_iter(data=True): # Look up the vertex structs from our vertices mapping and", "multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\",", "graph. \"\"\" # Phase 0: Create a directed or undirected graph-tool Graph gtG", "class. If a key is provided, it also ensures the key is in", "_, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key]", "tname = 'bool' elif isinstance(value, int): tname = 'float' value = float(value) elif", "Graph properties as \"internal properties\" for key, value in nxG.graph.items(): # Convert the", "'object' else: tname = 'string' value = str(value) return tname, value, key def", "import tqdm import networkx as nx import argparse import numpy as np import", "a special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') #", "in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True): #", "src, dst, data in nxG.edges_iter(data=True): # Look up the vertex structs from our", "actual value # Phase 1: Add the vertex and edge property maps #", "\"\"\" Performs typing and value conversion for the graph_tool PropertyMap class. If a", "elif isinstance(value, int): tname = 'float' value = float(value) elif isinstance(value, float): tname", "special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add", "is short for edge_properties # Done, finally! return gtG def get_centrality(subg): # centralities", "nxG.nodes_iter(data=True): # Go through all the properties if not seen and add them.", "print('Loading bundled graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'):", "value = str(value) return tname, value, key def nx2gt(nxG): \"\"\" Converts a networkx", "print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for line in", "as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second eprops =", "line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]]", "centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v", "Skip properties already added # Convert the value and key into a type", "value in data.items(): gtG.vp[key][v] = value # vp is short for vertex_properties #", "key.encode('ascii', errors='replace') # Deal with the value if isinstance(value, bool): tname = 'bool'", "value, key def nx2gt(nxG): \"\"\" Converts a networkx graph to a graph-tool graph.", "= value # Set the actual value # Phase 1: Add the vertex", "f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set =", "ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking", "node, data in nxG.nodes_iter(data=True): # Go through all the properties if not seen", "# Phase 1: Add the vertex and edge property maps # Go through", "and add them. for key, val in data.items(): if key in eprops: continue", "cache keys to only add properties once for src, dst, data in nxG.edges_iter(data=True):", "Look up the vertex structs from our vertices mapping and add edge. e", "in nxG.graph.items(): # Convert the value and key into a type for graph-tool", "bundled graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs", "the nodes and vertices with their properties # Add the nodes vertices =", "len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for", "cache keys to only add properties once for node, data in nxG.nodes_iter(data=True): #", "src, dst, data in nxG.edges_iter(data=True): # Go through all the edge properties if", "= centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if", "nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array()", "key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] =", "import networkx as nx import argparse import numpy as np import multiprocessing import", "nxG.nodes_iter(data=True): # Create the vertex and annotate for our edges later v =", "add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key,", "{} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg)", "get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop #", "# Set the actual value # Phase 1: Add the vertex and edge", "in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig", "key as ASCII key = key.encode('ascii', errors='replace') # Deal with the value if", "properties as \"internal properties\" for key, value in nxG.graph.items(): # Convert the value", "the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] = value #", "import argparse import numpy as np import multiprocessing import graph_tool as gt from", "used with the PropertyMap. Returns a tuple, (type name, value, key) \"\"\" if", "numpy as np import multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness", "parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G =", "it also ensures the key is in a format that can be used", "undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal", "for key, value in nxG.graph.items(): # Convert the value and key into a", "0: Create a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add", "unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname =", "tname = 'object' else: tname = 'string' value = str(value) return tname, value,", "value and key into a type for graph-tool tname, value, key = get_prop_type(value,", "val in data.items(): if key in eprops: continue # Skip properties already added", "properties, not forgetting the id property data['id'] = str(node) for key, value in", "\"\"\" if isinstance(key, unicode): # Encode the key as ASCII key = key.encode('ascii',", "in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value,", "graph_tool PropertyMap class. If a key is provided, it also ensures the key", "= gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap", "xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for", "set() # cache keys to only add properties once for src, dst, data", "typing and value conversion for the graph_tool PropertyMap class. If a key is", "tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap", "short for edge_properties # Done, finally! return gtG def get_centrality(subg): # centralities =", "property data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v] = value #", "if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w')", "the edge properties if not seen and add them. for key, val in", "Add the edge properties second eprops = set() # cache keys to only", "through all the edge properties if not seen and add them. for key,", "graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal properties\"", "isinstance(value, dict): tname = 'object' else: tname = 'string' value = str(value) return", "graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >=", "the graph_tool PropertyMap class. If a key is provided, it also ensures the", "as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set", "key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] =", "i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node)", "as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0]", "add seen properties # Add the node properties first nprops = set() #", "nprops = set() # cache keys to only add properties once for node,", "can be used with the PropertyMap. Returns a tuple, (type name, value, key)", "through all the properties if not seen and add them. for key, val", "in data.items(): if key in nprops: continue # Skip properties already added #", "graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output", "nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs typing and value conversion", "parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args =", "# Add the nodes vertices = {} # vertex mapping for tracking edges", "value # ep is short for edge_properties # Done, finally! return gtG def", "and key into a type for graph-tool tname, _, key = get_prop_type(val, key)", "once for node, data in nxG.nodes_iter(data=True): # Go through all the properties if", "data in nxG.nodes_iter(data=True): # Create the vertex and annotate for our edges later", "graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph() cpus =", "= get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop", "vp is short for vertex_properties # Add the edges for src, dst, data", "return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg)", "for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in", "only add properties once for node, data in nxG.nodes_iter(data=True): # Go through all", "isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname", "in data.items(): gtG.vp[key][v] = value # vp is short for vertex_properties # Add", "for edge_properties # Done, finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg)", "= prop # Set the PropertyMap gtG.graph_properties[key] = value # Set the actual", "elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict):", "key in eprops: continue # Skip properties already added # Convert the value", "mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties", "gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal properties\" for key,", "a type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname)", "edge property maps # Go through all nodes and edges and add seen", "vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge", "already seen properties eprops.add(key) # Phase 2: Actually add all the nodes and", "and value conversion for the graph_tool PropertyMap class. If a key is provided,", "centralities, _ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v))", "{} # vertex mapping for tracking edges later for node, data in nxG.nodes_iter(data=True):", "tracking edges later for node, data in nxG.nodes_iter(data=True): # Create the vertex and", "with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split()", "tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap", "# Go through all the properties if not seen and add them. for", "the key to the already seen properties nprops.add(key) # Also add the node", "are defined as indices. So we capture any strings # in a special", "tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True): # print", "Add the key to the already seen properties nprops.add(key) # Also add the", "# Add the key to the already seen properties eprops.add(key) # Phase 2:", "= line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes", "dict): tname = 'object' else: tname = 'string' value = str(value) return tname,", "print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes())", "id: in NetworkX a node can be any hashable type, but # in", "the properties if not seen and add them. for key, val in data.items():", "first nprops = set() # cache keys to only add properties once for", "in nxG.nodes_iter(data=True): # Go through all the properties if not seen and add", "nxG.edges_iter(data=True): # Go through all the edge properties if not seen and add", "in data.items(): gtG.ep[key][e] = value # ep is short for edge_properties # Done,", "hashable type, but # in graph-tool node are defined as indices. So we", "desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length", "vertex structs from our vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst])", "parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count()", "in nxG.edges_iter(data=True): # Go through all the edge properties if not seen and", "from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\")", "but # in graph-tool node are defined as indices. So we capture any", "gtG.graph_properties[key] = value # Set the actual value # Phase 1: Add the", "repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs typing and value conversion for", "betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args", "= gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap", "has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50:", "gtG.new_vertex_property('string') # Add the edge properties second eprops = set() # cache keys", "get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for i in xrange(3):", "value # Set the actual value # Phase 1: Add the vertex and", "# in a special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] =", "seen and add them. for key, val in data.items(): if key in eprops:", "also ensures the key is in a format that can be used with", "nx import argparse import numpy as np import multiprocessing import graph_tool as gt", "eprops = set() # cache keys to only add properties once for src,", "import multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser()", "properties once for node, data in nxG.nodes_iter(data=True): # Go through all the properties", "Also add the node id: in NetworkX a node can be any hashable", "key def nx2gt(nxG): \"\"\" Converts a networkx graph to a graph-tool graph. \"\"\"", "graph to a graph-tool graph. \"\"\" # Phase 0: Create a directed or", "a type for graph-tool tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname)", "key is in a format that can be used with the PropertyMap. Returns", "the vertex properties, not forgetting the id property data['id'] = str(node) for key,", "key, val in data.items(): if key in nprops: continue # Skip properties already", "up the vertex structs from our vertices mapping and add edge. e =", "# print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean", "multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for line", "value if isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname = 'float'", "(type name, value, key) \"\"\" if isinstance(key, unicode): # Encode the key as", "if isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname = 'float' value", "\"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph()", "the Graph properties as \"internal properties\" for key, value in nxG.graph.items(): # Convert", "the value and key into a type for graph-tool tname, value, key =", "# Create the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] =", "node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs typing and value", "# Add the key to the already seen properties nprops.add(key) # Also add", "cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f:", "desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True): # print u", "to the already seen properties eprops.add(key) # Phase 2: Actually add all the", "return tname, value, key def nx2gt(nxG): \"\"\" Converts a networkx graph to a", "called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge", "Converts a networkx graph to a graph-tool graph. \"\"\" # Phase 0: Create", "isinstance(value, int): tname = 'float' value = float(value) elif isinstance(value, float): tname =", "= centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for", "# Phase 2: Actually add all the nodes and vertices with their properties", "into a type for graph-tool tname, value, key = get_prop_type(value, key) prop =", "del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs typing and", "if key in nprops: continue # Skip properties already added # Convert the", "key, val in data.items(): if key in eprops: continue # Skip properties already", "subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy()", "# in graph-tool node are defined as indices. So we capture any strings", "# Done, finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities)", "tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...')", "node properties first nprops = set() # cache keys to only add properties", "tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1])", "float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname = 'string'", "key into a type for graph-tool tname, _, key = get_prop_type(val, key) prop", "isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname = 'float' value =", "with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split()", "open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if", "print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as f: for line in", "in a format that can be used with the PropertyMap. Returns a tuple,", "them. for key, val in data.items(): if key in eprops: continue # Skip", "= float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]]", "nodes and edges and add seen properties # Add the node properties first", "all the nodes and vertices with their properties # Add the nodes vertices", "gtG.ep[key][e] = value # ep is short for edge_properties # Done, finally! return", "a format that can be used with the PropertyMap. Returns a tuple, (type", "Add the edge properties for key, value in data.items(): gtG.ep[key][e] = value #", "= int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs", "the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap # Add the key", "PropertyMap # Add the key to the already seen properties eprops.add(key) # Phase", "Add the nodes vertices = {} # vertex mapping for tracking edges later", "node can be any hashable type, but # in graph-tool node are defined", "with their properties # Add the nodes vertices = {} # vertex mapping", "for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create", "edge properties if not seen and add them. for key, val in data.items():", "as \"internal properties\" for key, value in nxG.graph.items(): # Convert the value and", "the vertex and edge property maps # Go through all nodes and edges", "the vertex and annotate for our edges later v = gtG.add_vertex() vertices[node] =", "node id: in NetworkX a node can be any hashable type, but #", "centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg", "# Add the Graph properties as \"internal properties\" for key, value in nxG.graph.items():", "seen properties eprops.add(key) # Phase 2: Actually add all the nodes and vertices", "import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\")", "all nodes and edges and add seen properties # Add the node properties", "key, value in data.items(): gtG.vp[key][v] = value # vp is short for vertex_properties", "{} with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs =", "edge properties for key, value in data.items(): gtG.ep[key][e] = value # ep is", "capture any strings # in a special PropertyMap called 'id' -- modify as", "vertex mapping for tracking edges later for node, data in nxG.nodes_iter(data=True): # Create", "np import multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness parser =", "gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key, value in data.items(): gtG.ep[key][e]", "n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp,", "NetworkX a node can be any hashable type, but # in graph-tool node", "Create the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] = value", "and annotate for our edges later v = gtG.add_vertex() vertices[node] = v #", "file\") args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus))", "float): tname = 'float' elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii',", "the id property data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v] =", "defined as indices. So we capture any strings # in a special PropertyMap", "dst, data in nxG.edges_iter(data=True): # Go through all the edge properties if not", "'bool' elif isinstance(value, int): tname = 'float' value = float(value) elif isinstance(value, float):", "if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp =", "a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph", "properties for key, value in data.items(): gtG.ep[key][e] = value # ep is short", "tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object'", "Performs typing and value conversion for the graph_tool PropertyMap class. If a key", "# Set the PropertyMap # Add the key to the already seen properties", "from our vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add", "node, data in nxG.nodes_iter(data=True): # Create the vertex and annotate for our edges", "= nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'):", "as np import multiprocessing import graph_tool as gt from graph_tool.centrality import betweenness parser", "# ep is short for edge_properties # Done, finally! return gtG def get_centrality(subg):", "val in data.items(): if key in nprops: continue # Skip properties already added", "output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes,", "# Set the PropertyMap gtG.graph_properties[key] = value # Set the actual value #", "betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node in", "= get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop", "centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data", "= 'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else:", "= 'float' value = float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value,", "= nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r')", "tname, value, key def nx2gt(nxG): \"\"\" Converts a networkx graph to a graph-tool", "help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph() cpus", "isinstance(key, unicode): # Encode the key as ASCII key = key.encode('ascii', errors='replace') #", "a tuple, (type name, value, key) \"\"\" if isinstance(key, unicode): # Encode the", "3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {}", "prop # Set the PropertyMap # Add the key to the already seen", "get_prop_type(value, key=None): \"\"\" Performs typing and value conversion for the graph_tool PropertyMap class.", "added # Convert the value and key into a type for graph-tool tname,", "keys to only add properties once for src, dst, data in nxG.edges_iter(data=True): #", "the edges for src, dst, data in nxG.edges_iter(data=True): # Look up the vertex", "centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph)", "edges and add seen properties # Add the node properties first nprops =", "# cache keys to only add properties once for node, data in nxG.nodes_iter(data=True):", "parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...')", "to a graph-tool graph. \"\"\" # Phase 0: Create a directed or undirected", "add them. for key, val in data.items(): if key in nprops: continue #", "the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap # Add the key", "PropertyMap class. If a key is provided, it also ensures the key is", "graph-tool node are defined as indices. So we capture any strings # in", "attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length)", "= {} # vertex mapping for tracking edges later for node, data in", "conversion for the graph_tool PropertyMap class. If a key is provided, it also", "is in a format that can be used with the PropertyMap. Returns a", "# vp is short for vertex_properties # Add the edges for src, dst,", "_ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for", "for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy =", "properties # Add the nodes vertices = {} # vertex mapping for tracking", "nodes and vertices with their properties # Add the nodes vertices = {}", "= str(node) for key, value in data.items(): gtG.vp[key][v] = value # vp is", "graph_tool as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled", "= argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args()", "= 'float' elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif", "data in nxG.edges_iter(data=True): # Look up the vertex structs from our vertices mapping", "errors='replace') elif isinstance(value, dict): tname = 'object' else: tname = 'string' value =", "key to the already seen properties eprops.add(key) # Phase 2: Actually add all", "eprops: continue # Skip properties already added # Convert the value and key", "Actually add all the nodes and vertices with their properties # Add the", "prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop # Set the", "nxG.graph.items(): # Convert the value and key into a type for graph-tool tname,", "into a type for graph-tool tname, _, key = get_prop_type(val, key) prop =", "G.copy() print('Writing output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node", "If a key is provided, it also ensures the key is in a", "# Deal with the value if isinstance(value, bool): tname = 'bool' elif isinstance(value,", "in nprops: continue # Skip properties already added # Convert the value and", "# Encode the key as ASCII key = key.encode('ascii', errors='replace') # Deal with", "provided, it also ensures the key is in a format that can be", "prop # Set the PropertyMap gtG.graph_properties[key] = value # Set the actual value", "in nxG.nodes_iter(data=True): # Create the vertex and annotate for our edges later v", "for src, dst, data in nxG.edges_iter(data=True): # Look up the vertex structs from", "Add the key to the already seen properties eprops.add(key) # Phase 2: Actually", "in eprops: continue # Skip properties already added # Convert the value and", "lengths...') contig_length = {} with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading", "for node in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node]", "tuple, (type name, value, key) \"\"\" if isinstance(key, unicode): # Encode the key", "for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create", "the value if isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname =", "vertex and edge property maps # Go through all nodes and edges and", "as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph')", "Phase 2: Actually add all the nodes and vertices with their properties #", "desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del", "= value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else: tname = 'string'", "data.items(): if key in nprops: continue # Skip properties already added # Convert", "bool): tname = 'bool' elif isinstance(value, int): tname = 'float' value = float(value)", "properties nprops.add(key) # Also add the node id: in NetworkX a node can", "keys to only add properties once for node, data in nxG.nodes_iter(data=True): # Go", "type, but # in graph-tool node are defined as indices. So we capture", "PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the", "Create the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap # Add the", "# Look up the vertex structs from our vertices mapping and add edge.", "elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname = 'string' value", ">= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for i", "not seen and add them. for key, val in data.items(): if key in", "with the value if isinstance(value, bool): tname = 'bool' elif isinstance(value, int): tname", "properties already added # Convert the value and key into a type for", "the node properties first nprops = set() # cache keys to only add", "once for src, dst, data in nxG.edges_iter(data=True): # Go through all the edge", "# vertex mapping for tracking edges later for node, data in nxG.nodes_iter(data=True): #", "lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set", "type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) #", "nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev =", "value in nxG.graph.items(): # Convert the value and key into a type for", "add properties once for src, dst, data in nxG.edges_iter(data=True): # Go through all", "Go through all the properties if not seen and add them. for key,", "gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap #", "properties once for src, dst, data in nxG.edges_iter(data=True): # Go through all the", "ensures the key is in a format that can be used with the", "value # Phase 1: Add the vertex and edge property maps # Go", "the already seen properties eprops.add(key) # Phase 2: Actually add all the nodes", "# Convert the value and key into a type for graph-tool tname, _,", "for graph-tool tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create", "properties if not seen and add them. for key, val in data.items(): if", "gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] = value # Set the", "the already seen properties nprops.add(key) # Also add the node id: in NetworkX", "second eprops = set() # cache keys to only add properties once for", "gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig", "vertices[node] = v # Set the vertex properties, not forgetting the id property", "and add them. for key, val in data.items(): if key in nprops: continue", "key to the already seen properties nprops.add(key) # Also add the node id:", "later v = gtG.add_vertex() vertices[node] = v # Set the vertex properties, not", "to only add properties once for node, data in nxG.nodes_iter(data=True): # Go through", "elif isinstance(value, dict): tname = 'object' else: tname = 'string' value = str(value)", "for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes())", "and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for", "add the node id: in NetworkX a node can be any hashable type,", "= set() # cache keys to only add properties once for node, data", "{} def get_prop_type(value, key=None): \"\"\" Performs typing and value conversion for the graph_tool", "seen and add them. for key, val in data.items(): if key in nprops:", "a type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname)", "key in nprops: continue # Skip properties already added # Convert the value", "+ 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has", "gtG.vertex_properties[key] = prop # Set the PropertyMap # Add the key to the", "Go through all nodes and edges and add seen properties # Add the", "argparse import numpy as np import multiprocessing import graph_tool as gt from graph_tool.centrality", "# Add the node properties first nprops = set() # cache keys to", "can be any hashable type, but # in graph-tool node are defined as", "50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for i in", "vertex_properties # Add the edges for src, dst, data in nxG.edges_iter(data=True): # Look", "a networkx graph to a graph-tool graph. \"\"\" # Phase 0: Create a", "get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop #", "properties\" for key, value in nxG.graph.items(): # Convert the value and key into", "key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop # Set", "import numpy as np import multiprocessing import graph_tool as gt from graph_tool.centrality import", "id property data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v] = value", "e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key, value in", "Deal with the value if isinstance(value, bool): tname = 'bool' elif isinstance(value, int):", "= gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key, value in data.items():", "total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile", "forgetting the id property data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v]", "def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _", "a node can be any hashable type, but # in graph-tool node are", "gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second eprops = set() #", "Set the vertex properties, not forgetting the id property data['id'] = str(node) for", "\"internal properties\" for key, value in nxG.graph.items(): # Convert the value and key", "with the PropertyMap. Returns a tuple, (type name, value, key) \"\"\" if isinstance(key,", "dst, data in nxG.edges_iter(data=True): # Look up the vertex structs from our vertices", "PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap # Add the key to", "PropertyMap gtG.graph_properties[key] = value # Set the actual value # Phase 1: Add", "eprops.add(key) # Phase 2: Actually add all the nodes and vertices with their", "# centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g)", "= set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as f: for", "Set the actual value # Phase 1: Add the vertex and edge property", "Convert the value and key into a type for graph-tool tname, value, key", "gtG.add_vertex() vertices[node] = v # Set the vertex properties, not forgetting the id", "get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ =", "float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean +", "for our edges later v = gtG.add_vertex() vertices[node] = v # Set the", "# Add the edge properties for key, value in data.items(): gtG.ep[key][e] = value", "length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using", "set() # cache keys to only add properties once for node, data in", "= nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev", "if isinstance(key, unicode): # Encode the key as ASCII key = key.encode('ascii', errors='replace')", "attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {}", "# Create the vertex and annotate for our edges later v = gtG.add_vertex()", "Set the PropertyMap # Add the key to the already seen properties eprops.add(key)", "print('Writing output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in", "value and key into a type for graph-tool tname, _, key = get_prop_type(val,", "key) \"\"\" if isinstance(key, unicode): # Encode the key as ASCII key =", "the key is in a format that can be used with the PropertyMap.", "# Add the edge properties second eprops = set() # cache keys to", "the edge properties for key, value in data.items(): gtG.ep[key][e] = value # ep", "= str(value) return tname, value, key def nx2gt(nxG): \"\"\" Converts a networkx graph", "components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy", "= 'bool' elif isinstance(value, int): tname = 'float' value = float(value) elif isinstance(value,", "= open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'):", "any hashable type, but # in graph-tool node are defined as indices. So", "add all the nodes and vertices with their properties # Add the nodes", "graph-tool tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the", "if key in eprops: continue # Skip properties already added # Convert the", "\"\"\" Converts a networkx graph to a graph-tool graph. \"\"\" # Phase 0:", "errors='replace') # Deal with the value if isinstance(value, bool): tname = 'bool' elif", "Create a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the", "indices. So we capture any strings # in a special PropertyMap called 'id'", "node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as f:", "vertex properties, not forgetting the id property data['id'] = str(node) for key, value", "PropertyMap. Returns a tuple, (type name, value, key) \"\"\" if isinstance(key, unicode): #", "their properties # Add the nodes vertices = {} # vertex mapping for", "the actual value # Phase 1: Add the vertex and edge property maps", "the nodes vertices = {} # vertex mapping for tracking edges later for", "parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {}", "tname = 'float' elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace')", "value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else: tname =", "we capture any strings # in a special PropertyMap called 'id' -- modify", "for key, val in data.items(): if key in eprops: continue # Skip properties", "'float' elif isinstance(value, unicode): tname = 'string' value = value.encode('ascii', errors='replace') elif isinstance(value,", "ep is short for edge_properties # Done, finally! return gtG def get_centrality(subg): #", "= parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled", "and edge property maps # Go through all nodes and edges and add", "\"\"\" # Phase 0: Create a directed or undirected graph-tool Graph gtG =", "G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with", "that can be used with the PropertyMap. Returns a tuple, (type name, value,", "properties eprops.add(key) # Phase 2: Actually add all the nodes and vertices with", "def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in", "in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set: contig_length[attrs[0]] =", "seen properties # Add the node properties first nprops = set() # cache", "open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if", "Phase 1: Add the vertex and edge property maps # Go through all", "for tracking edges later for node, data in nxG.nodes_iter(data=True): # Create the vertex", "only add properties once for src, dst, data in nxG.edges_iter(data=True): # Go through", "key=None): \"\"\" Performs typing and value conversion for the graph_tool PropertyMap class. If", "be used with the PropertyMap. Returns a tuple, (type name, value, key) \"\"\"", "centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node]", "= 'string' value = str(value) return tname, value, key def nx2gt(nxG): \"\"\" Converts", "_, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key]", "nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as", "Add the edges for src, dst, data in nxG.edges_iter(data=True): # Look up the", "through all nodes and edges and add seen properties # Add the node", "gtG.edge_properties[key] = prop # Set the PropertyMap # Add the key to the", "Create the vertex and annotate for our edges later v = gtG.add_vertex() vertices[node]", "data['id'] = str(node) for key, value in data.items(): gtG.vp[key][v] = value # vp", "open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3])", "be any hashable type, but # in graph-tool node are defined as indices.", "the edge properties second eprops = set() # cache keys to only add", "= line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {} with", "G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as", "line.split() if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes =", "in data.items(): if key in eprops: continue # Skip properties already added #", "= gtG.new_vertex_property('string') # Add the edge properties second eprops = set() # cache", "key into a type for graph-tool tname, value, key = get_prop_type(value, key) prop", "node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True):", "str(value) return tname, value, key def nx2gt(nxG): \"\"\" Converts a networkx graph to", "them. for key, val in data.items(): if key in nprops: continue # Skip", "in a special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string')", "= v # Set the vertex properties, not forgetting the id property data['id']", ">= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The", "isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname = 'string' value =", "= float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean", "in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph):", "value, key) \"\"\" if isinstance(key, unicode): # Encode the key as ASCII key", "# Convert the value and key into a type for graph-tool tname, value,", "data.items(): gtG.vp[key][v] = value # vp is short for vertex_properties # Add the", "stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node] >= mean + 3*stdev:", "structs from our vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) #", "vertices[dst]) # Add the edge properties for key, value in data.items(): gtG.ep[key][e] =", "= {} def get_prop_type(value, key=None): \"\"\" Performs typing and value conversion for the", "add properties once for node, data in nxG.nodes_iter(data=True): # Go through all the", "maps # Go through all nodes and edges and add seen properties #", "the PropertyMap # Add the key to the already seen properties nprops.add(key) #", "# Create the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap # Add", "value conversion for the graph_tool PropertyMap class. If a key is provided, it", "for key, val in data.items(): if key in nprops: continue # Skip properties", "as ASCII key = key.encode('ascii', errors='replace') # Deal with the value if isinstance(value,", "and add seen properties # Add the node properties first nprops = set()", "f: for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in", "any strings # in a special PropertyMap called 'id' -- modify as needed!", "data.items(): if key in eprops: continue # Skip properties already added # Convert", "= betweenness(_g) v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node", "_g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp", "Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal properties\" for", "property maps # Go through all nodes and edges and add seen properties", "prop = gtG.new_vertex_property(tname) # Create the PropertyMap gtG.vertex_properties[key] = prop # Set the", "all the edge properties if not seen and add them. for key, val", "all the properties if not seen and add them. for key, val in", "mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph", "edge_properties # Done, finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) #", "in graph-tool node are defined as indices. So we capture any strings #", "modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second eprops", "# Add the edges for src, dst, data in nxG.edges_iter(data=True): # Look up", "Done, finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g", "in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n')", "vertex and annotate for our edges later v = gtG.add_vertex() vertices[node] = v", "networkx as nx import argparse import numpy as np import multiprocessing import graph_tool", "networkx graph to a graph-tool graph. \"\"\" # Phase 0: Create a directed", "repeats'): if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True): # print u +\"\\t\"+data[u][v]['ori'][0]+v+\"\\t\"+data[u][v]['ori'][1]+\"\\t\"+str(data[u][v][\"mean\"])+\"\\t\"+str(data[u][v][\"stdev\"])+\"\\t\"+str(data[u][v][\"bsize\"])", "= gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap", "cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading", "graph-tool graph. \"\"\" # Phase 0: Create a directed or undirected graph-tool Graph", "finally! return gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g =", "Phase 0: Create a directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) #", "import graph_tool as gt from graph_tool.centrality import betweenness parser = argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\",", "contig_length = {} with open(args.length,'r') as f: for line in tqdm.tqdm(f, desc='Reading lengths'):", "key, value in nxG.graph.items(): # Convert the value and key into a type", "-- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties second", "vertices = {} # vertex mapping for tracking edges later for node, data", "= gtG.add_vertex() vertices[node] = v # Set the vertex properties, not forgetting the", "annotate for our edges later v = gtG.add_vertex() vertices[node] = v # Set", "graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the", "'string' value = value.encode('ascii', errors='replace') elif isinstance(value, dict): tname = 'object' else: tname", "in NetworkX a node can be any hashable type, but # in graph-tool", "and edges and add seen properties # Add the node properties first nprops", "= prop # Set the PropertyMap # Add the key to the already", "mean = float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices(): if centralities[node] >=", "edge properties second eprops = set() # cache keys to only add properties", "properties second eprops = set() # cache keys to only add properties once", "graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the", "for line in tqdm.tqdm(f, desc='Reading lengths'): attrs = line.split() if attrs[0] in node_set:", "node are defined as indices. So we capture any strings # in a", "= float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname =", "data in nxG.edges_iter(data=True): # Go through all the edge properties if not seen", "args = parser.parse_args() G = nx.Graph() cpus = multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading", "the key to the already seen properties eprops.add(key) # Phase 2: Actually add", "short for vertex_properties # Add the edges for src, dst, data in nxG.edges_iter(data=True):", "data.items(): gtG.ep[key][e] = value # ep is short for edge_properties # Done, finally!", "key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop # Set", "gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap #", "# Create the PropertyMap gtG.edge_properties[key] = prop # Set the PropertyMap # Add", "as nx import argparse import numpy as np import multiprocessing import graph_tool as", "or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as", "the key as ASCII key = key.encode('ascii', errors='replace') # Deal with the value", "repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp))", "# Go through all nodes and edges and add seen properties # Add", "PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key] = value # Set", "for the graph_tool PropertyMap class. If a key is provided, it also ensures", "for src, dst, data in nxG.edges_iter(data=True): # Go through all the edge properties", "the value and key into a type for graph-tool tname, _, key =", "nx2gt(nxG): \"\"\" Converts a networkx graph to a graph-tool graph. \"\"\" # Phase", "prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop # Set the", "data in nxG.nodes_iter(data=True): # Go through all the properties if not seen and", "for i in xrange(3): centrality_wrapper(G_copy) for node in tqdm.tqdm(repeat_nodes, desc='Checking repeats'): if G_copy.has_node(node):", "else: tname = 'string' value = str(value) return tname, value, key def nx2gt(nxG):", "_g = nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean = float(np.mean(v))", "= multiprocessing.cpu_count() print('Using {} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for", "if attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {}", "nxG.edges_iter(data=True): # Look up the vertex structs from our vertices mapping and add", "= get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop", "a graph-tool graph. \"\"\" # Phase 0: Create a directed or undirected graph-tool", "value # vp is short for vertex_properties # Add the edges for src,", "v = centralities.get_array() mean = float(np.mean(v)) stdev = float(np.std(v)) for node in _g.vertices():", "the PropertyMap gtG.graph_properties[key] = value # Set the actual value # Phase 1:", "Create the PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap # Add the", "value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key]", "tqdm import networkx as nx import argparse import numpy as np import multiprocessing", "our edges later v = gtG.add_vertex() vertices[node] = v # Set the vertex", "'id' -- modify as needed! gtG.vertex_properties['id'] = gtG.new_vertex_property('string') # Add the edge properties", "type for graph-tool tname, value, key = get_prop_type(value, key) prop = gtG.new_graph_property(tname) #", "Add the vertex and edge property maps # Go through all nodes and", "# Skip properties already added # Convert the value and key into a", "edges later for node, data in nxG.nodes_iter(data=True): # Create the vertex and annotate", "# Also add the node id: in NetworkX a node can be any", "for node, data in nxG.nodes_iter(data=True): # Create the vertex and annotate for our", "tname, _, key = get_prop_type(val, key) prop = gtG.new_vertex_property(tname) # Create the PropertyMap", "in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing", "name, value, key) \"\"\" if isinstance(key, unicode): # Encode the key as ASCII", "for vertex_properties # Add the edges for src, dst, data in nxG.edges_iter(data=True): #", "So we capture any strings # in a special PropertyMap called 'id' --", "edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the edge properties for key, value", "G_copy = G.copy() print('Writing output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy)", "the vertex structs from our vertices mapping and add edge. e = gtG.add_edge(vertices[src],", "Encode the key as ASCII key = key.encode('ascii', errors='replace') # Deal with the", "and key into a type for graph-tool tname, value, key = get_prop_type(value, key)", "set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r') as f: for line", "# Set the vertex properties, not forgetting the id property data['id'] = str(node)", "gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal properties\" for key, value in", "Set the PropertyMap # Add the key to the already seen properties nprops.add(key)", "gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop # Set the PropertyMap gtG.graph_properties[key]", "continue # Skip properties already added # Convert the value and key into", "the node id: in NetworkX a node can be any hashable type, but", "node in _g.vertices(): if centralities[node] >= mean + 3*stdev: repeat_nodes[_g.vertex_properties['id'][node]] = centralities[node] def", "format that can be used with the PropertyMap. Returns a tuple, (type name,", "get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] = prop #", "2: Actually add all the nodes and vertices with their properties # Add", "gtG.vp[key][v] = value # vp is short for vertex_properties # Add the edges", "a key is provided, it also ensures the key is in a format", "centrality_wrapper(graph): n_comp = nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph),", "properties # Add the node properties first nprops = set() # cache keys", "key, value in data.items(): gtG.ep[key][e] = value # ep is short for edge_properties", "nodes vertices = {} # vertex mapping for tracking edges later for node,", "is provided, it also ensures the key is in a format that can", "= value # ep is short for edge_properties # Done, finally! return gtG", "argparse.ArgumentParser() parser.add_argument(\"-g\", \"--graph\", help='bundled graph') parser.add_argument(\"-l\",\"--length\",help=\"contig length\") parser.add_argument(\"-o\",\"--output\",help=\"output file\") args = parser.parse_args() G", "Add the node properties first nprops = set() # cache keys to only", "add them. for key, val in data.items(): if key in eprops: continue #", "later for node, data in nxG.nodes_iter(data=True): # Create the vertex and annotate for", "= G.copy() print('Writing output...') ofile = open(args.output,'w') for i in xrange(3): centrality_wrapper(G_copy) for", "Returns a tuple, (type name, value, key) \"\"\" if isinstance(key, unicode): # Encode", "Convert the value and key into a type for graph-tool tname, _, key", "node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None):", "to the already seen properties nprops.add(key) # Also add the node id: in", "our vertices mapping and add edge. e = gtG.add_edge(vertices[src], vertices[dst]) # Add the", "strings # in a special PropertyMap called 'id' -- modify as needed! gtG.vertex_properties['id']", "line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length = {} with open(args.length,'r')", "= value # vp is short for vertex_properties # Add the edges for", "tname = 'string' value = str(value) return tname, value, key def nx2gt(nxG): \"\"\"", "seen properties nprops.add(key) # Also add the node id: in NetworkX a node", "for node, data in nxG.nodes_iter(data=True): # Go through all the properties if not", "for key, value in data.items(): gtG.vp[key][v] = value # vp is short for", "v = gtG.add_vertex() vertices[node] = v # Set the vertex properties, not forgetting", "= nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v =", "print(centralities) _g = nx2gt(subg) centralities, _ = betweenness(_g) v = centralities.get_array() mean =", "ASCII key = key.encode('ascii', errors='replace') # Deal with the value if isinstance(value, bool):", "contig lengths...') contig_length = {} with open(args.length,'r') as f: for line in tqdm.tqdm(f,", "= gt.Graph(directed=nxG.is_directed()) # Add the Graph properties as \"internal properties\" for key, value", "directed or undirected graph-tool Graph gtG = gt.Graph(directed=nxG.is_directed()) # Add the Graph properties", "gtG def get_centrality(subg): # centralities = nx.betweenness_centrality(subg) # print(centralities) _g = nx2gt(subg) centralities,", "unicode): # Encode the key as ASCII key = key.encode('ascii', errors='replace') # Deal", "PropertyMap gtG.vertex_properties[key] = prop # Set the PropertyMap # Add the key to", "= 'object' else: tname = 'string' value = str(value) return tname, value, key", "the PropertyMap # Add the key to the already seen properties eprops.add(key) #", "nx.number_connected_components(graph) print('The graph has {} components'.format(n_comp)) for subg in tqdm.tqdm(nx.connected_component_subgraphs(graph), total=n_comp, desc='Component'): if", "key = key.encode('ascii', errors='replace') # Deal with the value if isinstance(value, bool): tname", "is short for vertex_properties # Add the edges for src, dst, data in", "value in data.items(): gtG.ep[key][e] = value # ep is short for edge_properties #", "int): tname = 'float' value = float(value) elif isinstance(value, float): tname = 'float'", "key) prop = gtG.new_graph_property(tname) # Create the PropertyMap gtG.graph_properties[key] = prop # Set", "type for graph-tool tname, _, key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) #", "# Go through all the edge properties if not seen and add them.", "edges for src, dst, data in nxG.edges_iter(data=True): # Look up the vertex structs", "the PropertyMap. Returns a tuple, (type name, value, key) \"\"\" if isinstance(key, unicode):", "contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\"", "and vertices with their properties # Add the nodes vertices = {} #", "Go through all the edge properties if not seen and add them. for", "not forgetting the id property data['id'] = str(node) for key, value in data.items():", "nprops.add(key) # Also add the node id: in NetworkX a node can be", "graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f, desc='Reading bundled'): attrs =", "'float' value = float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode):", "Add the Graph properties as \"internal properties\" for key, value in nxG.graph.items(): #", "as indices. So we capture any strings # in a special PropertyMap called", "def get_prop_type(value, key=None): \"\"\" Performs typing and value conversion for the graph_tool PropertyMap", "desc='Component'): if len(subg.nodes()) >= 50: get_centrality(subg) G_copy = G.copy() print('Writing output...') ofile =", "int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def get_prop_type(value, key=None): \"\"\" Performs typing", "tname = 'float' value = float(value) elif isinstance(value, float): tname = 'float' elif", "line in tqdm.tqdm(f, desc='Reading bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading", "mapping for tracking edges later for node, data in nxG.nodes_iter(data=True): # Create the", "1: Add the vertex and edge property maps # Go through all nodes", "if G_copy.has_node(node): G_copy.remove_node(node) ofile.write(str(node)+'\\t'+str(repeat_nodes[node])+'\\n') #for u,v,data in G_copy.edges(data=True): # print u +\"\\t\"+data[u][v]['ori'][0]+v+\"\\t\"+data[u][v]['ori'][1]+\"\\t\"+str(data[u][v][\"mean\"])+\"\\t\"+str(data[u][v][\"stdev\"])+\"\\t\"+str(data[u][v][\"bsize\"]) #nx.write_gml(G_copy,args.output)", "# cache keys to only add properties once for src, dst, data in", "to only add properties once for src, dst, data in nxG.edges_iter(data=True): # Go", "def nx2gt(nxG): \"\"\" Converts a networkx graph to a graph-tool graph. \"\"\" #", "{} cpus'.format(cpus)) print('Loading bundled graph...') with open(args.graph,'r') as f: for line in tqdm.tqdm(f,", "nprops: continue # Skip properties already added # Convert the value and key", "key = get_prop_type(val, key) prop = gtG.new_edge_property(tname) # Create the PropertyMap gtG.edge_properties[key] =", "str(node) for key, value in data.items(): gtG.vp[key][v] = value # vp is short", "vertices with their properties # Add the nodes vertices = {} # vertex", "bundled'): attrs = line.split() G.add_edge(attrs[0],attrs[2],mean=float(attrs[4]),stdev=float(attrs[5]),bsize=int(attrs[6]),ori=attrs[1]+attrs[3]) node_set = set(G.nodes()) print('Loading contig lengths...') contig_length =", "attrs[0] in node_set: contig_length[attrs[0]] = int(attrs[1]) del node_set nx.set_node_attributes(G,'length',contig_length) repeat_nodes = {} def", "value = float(value) elif isinstance(value, float): tname = 'float' elif isinstance(value, unicode): tname" ]