code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import torch
import torch.nn.functional as F
from .gradcam import GradCAM
class GradCAMpp(GradCAM):
"""
GradCAM++, inherit from BaseCAM
"""
def __init__(self, model_dict):
super(GradCAMpp, self).__init__(model_dict)
def forward(self, input_image, class_idx=None, retain_graph=False):
"""Generates GradCAM++ result.
# Arguments
input_image: torch.Tensor. Preprocessed image with shape (1, C, H, W).
class_idx: int. Index of target class. Defaults to be index of predicted class.
# Return
Result of GradCAM++ (torch.Tensor) with shape (1, H, W).
"""
b, c, h, w = input_image.size()
logit = self.model_arch(input_image)
if class_idx is None:
score = logit[:, logit.max(1)[-1]].squeeze()
else:
score = logit[:, class_idx].squeeze()
self.model_arch.zero_grad()
score.backward(retain_graph=retain_graph)
gradients = self.gradients['value']
activations = self.activations['value']
b, k, u, v = gradients.size()
alpha_num = gradients.pow(2)
global_sum = activations.view(b, k, u * v).sum(-1, keepdim=True).view(b, k, 1, 1)
alpha_denom = gradients.pow(2).mul(2) + global_sum.mul(gradients.pow(3))
alpha_denom = torch.where(alpha_denom != 0.0, alpha_denom, torch.ones_like(alpha_denom))
alpha = alpha_num.div(alpha_denom + 1e-7)
positive_gradients = F.relu(score.exp() * gradients)
weights = (alpha * positive_gradients).view(b, k, u * v).sum(-1).view(b, k, 1, 1)
saliency_map = (weights * activations).sum(1, keepdim=True)
saliency_map = F.relu(saliency_map)
saliency_map = F.interpolate(saliency_map, size=(224, 224), mode='bilinear', align_corners=False)
saliency_map_min, saliency_map_max = saliency_map.min(), saliency_map.max()
saliency_map = (saliency_map - saliency_map_min).div(saliency_map_max - saliency_map_min).data
return saliency_map
|
[
"torch.ones_like",
"torch.nn.functional.interpolate",
"torch.nn.functional.relu"
] |
[((1712, 1732), 'torch.nn.functional.relu', 'F.relu', (['saliency_map'], {}), '(saliency_map)\n', (1718, 1732), True, 'import torch.nn.functional as F\n'), ((1756, 1843), 'torch.nn.functional.interpolate', 'F.interpolate', (['saliency_map'], {'size': '(224, 224)', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(saliency_map, size=(224, 224), mode='bilinear', align_corners\n =False)\n", (1769, 1843), True, 'import torch.nn.functional as F\n'), ((1388, 1416), 'torch.ones_like', 'torch.ones_like', (['alpha_denom'], {}), '(alpha_denom)\n', (1403, 1416), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Teampro and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, math, json
# import erpnext
from frappe import _
from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime
from paypro.paypro.doctype.controllers.accounts_controller import AccountsController
class Loan(AccountsController):
def validate(self):
self.set_loan_amount()
self.set_missing_fields()
self.validate_accounts()
self.validate_loan_security_pledge()
self.validate_loan_amount()
self.check_sanctioned_amount_limit()
if self.is_term_loan:
validate_repayment_method(self.repayment_method, self.loan_amount, self.monthly_repayment_amount,
self.repayment_periods, self.is_term_loan)
self.make_repayment_schedule()
self.set_repayment_period()
self.calculate_totals()
def validate_accounts(self):
for fieldname in ['payment_account', 'loan_account', 'interest_income_account', 'penalty_income_account']:
company = frappe.get_value("Account", self.get(fieldname), 'company')
if company != self.company:
frappe.throw(_("Account {0} does not belongs to company {1}").format(frappe.bold(self.get(fieldname)),
frappe.bold(self.company)))
def on_submit(self):
self.link_loan_security_pledge()
def on_cancel(self):
self.unlink_loan_security_pledge()
def set_missing_fields(self):
if not self.company:
self.company = paypro.get_default_company()
if not self.posting_date:
self.posting_date = nowdate()
if self.loan_type and not self.rate_of_interest:
self.rate_of_interest = frappe.db.get_value("Loan Type", self.loan_type, "rate_of_interest")
if self.repayment_method == "Repay Over Number of Periods":
self.monthly_repayment_amount = get_monthly_repayment_amount(self.repayment_method, self.loan_amount, self.rate_of_interest, self.repayment_periods)
def validate_loan_security_pledge(self):
if self.is_secured_loan and not self.loan_security_pledge:
frappe.throw(_("Loan Security Pledge is mandatory for secured loan"))
if self.loan_security_pledge:
loan_security_details = frappe.db.get_value("Loan Security Pledge", self.loan_security_pledge,
['loan', 'company'], as_dict=1)
if loan_security_details.loan:
frappe.throw(_("Loan Security Pledge already pledged against loan {0}").format(loan_security_details.loan))
if loan_security_details.company != self.company:
frappe.throw(_("Loan Security Pledge Company and Loan Company must be same"))
def check_sanctioned_amount_limit(self):
total_loan_amount = get_total_loan_amount(self.applicant_type, self.applicant, self.company)
sanctioned_amount_limit = get_sanctioned_amount_limit(self.applicant_type, self.applicant, self.company)
if sanctioned_amount_limit and flt(self.loan_amount) + flt(total_loan_amount) > flt(sanctioned_amount_limit):
frappe.throw(_("Sanctioned Amount limit crossed for {0} {1}").format(self.applicant_type, frappe.bold(self.applicant)))
def make_repayment_schedule(self):
if not self.repayment_start_date:
frappe.throw(_("Repayment Start Date is mandatory for term loans"))
self.repayment_schedule = []
payment_date = self.repayment_start_date
balance_amount = self.loan_amount
while(balance_amount > 0):
interest_amount = rounded(balance_amount * flt(self.rate_of_interest) / (12*100))
principal_amount = self.monthly_repayment_amount - interest_amount
balance_amount = rounded(balance_amount + interest_amount - self.monthly_repayment_amount)
if balance_amount < 0:
principal_amount += balance_amount
balance_amount = 0.0
total_payment = principal_amount + interest_amount
self.append("repayment_schedule", {
"payment_date": payment_date,
"principal_amount": principal_amount,
"interest_amount": interest_amount,
"total_payment": total_payment,
"balance_loan_amount": balance_amount
})
next_payment_date = add_months(payment_date, 1)
payment_date = next_payment_date
def set_repayment_period(self):
if self.repayment_method == "Repay Fixed Amount per Period":
repayment_periods = len(self.repayment_schedule)
self.repayment_periods = repayment_periods
def calculate_totals(self):
self.total_payment = 0
self.total_interest_payable = 0
self.total_amount_paid = 0
if self.is_term_loan:
for data in self.repayment_schedule:
self.total_payment += data.total_payment
self.total_interest_payable +=data.interest_amount
else:
self.total_payment = self.loan_amount
def set_loan_amount(self):
if not self.loan_amount and self.is_secured_loan and self.loan_security_pledge:
self.loan_amount = self.maximum_loan_value
def validate_loan_amount(self):
if self.is_secured_loan and self.loan_amount > self.maximum_loan_value:
msg = _("Loan amount cannot be greater than {0}").format(self.maximum_loan_value)
frappe.throw(msg)
if not self.loan_amount:
frappe.throw(_("Loan amount is mandatory"))
def link_loan_security_pledge(self):
frappe.db.sql("""UPDATE `tabLoan Security Pledge` SET
loan = %s, status = 'Pledged', pledge_time = %s
where name = %s """, (self.name, now_datetime(), self.loan_security_pledge))
def unlink_loan_security_pledge(self):
frappe.db.sql("""UPDATE `tabLoan Security Pledge` SET
loan = '', status = 'Unpledged'
where name = %s """, (self.loan_security_pledge))
def update_total_amount_paid(doc):
total_amount_paid = 0
for data in doc.repayment_schedule:
if data.paid:
total_amount_paid += data.total_payment
frappe.db.set_value("Loan", doc.name, "total_amount_paid", total_amount_paid)
def get_total_loan_amount(applicant_type, applicant, company):
return frappe.db.get_value('Loan',
{'applicant_type': applicant_type, 'company': company, 'applicant': applicant, 'docstatus': 1},
'sum(loan_amount)')
def get_sanctioned_amount_limit(applicant_type, applicant, company):
return frappe.db.get_value('Sanctioned Loan Amount',
{'applicant_type': applicant_type, 'company': company, 'applicant': applicant},
'sanctioned_amount_limit')
def validate_repayment_method(repayment_method, loan_amount, monthly_repayment_amount, repayment_periods, is_term_loan):
if is_term_loan and not repayment_method:
frappe.throw(_("Repayment Method is mandatory for term loans"))
if repayment_method == "Repay Over Number of Periods" and not repayment_periods:
frappe.throw(_("Please enter Repayment Periods"))
if repayment_method == "Repay Fixed Amount per Period":
if not monthly_repayment_amount:
frappe.throw(_("Please enter repayment Amount"))
if monthly_repayment_amount > loan_amount:
frappe.throw(_("Monthly Repayment Amount cannot be greater than Loan Amount"))
def get_monthly_repayment_amount(repayment_method, loan_amount, rate_of_interest, repayment_periods):
if rate_of_interest:
monthly_interest_rate = flt(rate_of_interest) / (12 *100)
monthly_repayment_amount = math.ceil((loan_amount * monthly_interest_rate *
(1 + monthly_interest_rate)**repayment_periods) \
/ ((1 + monthly_interest_rate)**repayment_periods - 1))
else:
monthly_repayment_amount = math.ceil(flt(loan_amount) / repayment_periods)
return monthly_repayment_amount
@frappe.whitelist()
def get_loan_application(loan_application):
loan = frappe.get_doc("Loan Application", loan_application)
if loan:
return loan.as_dict()
def close_loan(loan, total_amount_paid):
frappe.db.set_value("Loan", loan, "total_amount_paid", total_amount_paid)
frappe.db.set_value("Loan", loan, "status", "Closed")
@frappe.whitelist()
def make_loan_disbursement(loan, company, applicant_type, applicant, disbursed_amount=0, as_dict=0):
disbursement_entry = frappe.new_doc("Loan Disbursement")
disbursement_entry.against_loan = loan
disbursement_entry.applicant_type = applicant_type
disbursement_entry.applicant = applicant
disbursement_entry.company = company
disbursement_entry.disbursement_date = nowdate()
if disbursed_amount:
disbursement_entry.disbursed_amount = disbursed_amount
if as_dict:
return disbursement_entry.as_dict()
else:
return disbursement_entry
@frappe.whitelist()
def make_repayment_entry(loan, applicant_type, applicant, loan_type, company, as_dict=0):
repayment_entry = frappe.new_doc("Loan Repayment")
repayment_entry.against_loan = loan
repayment_entry.applicant_type = applicant_type
repayment_entry.applicant = applicant
repayment_entry.company = company
repayment_entry.loan_type = loan_type
repayment_entry.posting_date = nowdate()
if as_dict:
return repayment_entry.as_dict()
else:
return repayment_entry
@frappe.whitelist()
def create_loan_security_unpledge(loan, applicant_type, applicant, company):
loan_security_pledge_details = frappe.db.sql("""
SELECT p.parent, p.loan_security, p.qty as qty FROM `tabLoan Security Pledge` lsp , `tabPledge` p
WHERE p.parent = lsp.name AND lsp.loan = %s AND lsp.docstatus = 1
""",(loan), as_dict=1)
unpledge_request = frappe.new_doc("Loan Security Unpledge")
unpledge_request.applicant_type = applicant_type
unpledge_request.applicant = applicant
unpledge_request.loan = loan
unpledge_request.company = company
for loan_security in loan_security_pledge_details:
unpledge_request.append('securities', {
"loan_security": loan_security.loan_security,
"qty": loan_security.qty,
"against_pledge": loan_security.parent
})
return unpledge_request.as_dict()
|
[
"frappe.utils.flt",
"frappe.utils.now_datetime",
"frappe.utils.rounded",
"math.ceil",
"frappe.whitelist",
"frappe.db.sql",
"frappe.db.get_value",
"frappe.db.set_value",
"frappe.new_doc",
"frappe.bold",
"frappe.get_doc",
"frappe._",
"frappe.utils.nowdate",
"frappe.utils.add_months",
"frappe.throw"
] |
[((7256, 7274), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (7272, 7274), False, 'import frappe, math, json\n'), ((7588, 7606), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (7604, 7606), False, 'import frappe, math, json\n'), ((8156, 8174), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (8172, 8174), False, 'import frappe, math, json\n'), ((8641, 8659), 'frappe.whitelist', 'frappe.whitelist', ([], {}), '()\n', (8657, 8659), False, 'import frappe, math, json\n'), ((5590, 5667), 'frappe.db.set_value', 'frappe.db.set_value', (['"""Loan"""', 'doc.name', '"""total_amount_paid"""', 'total_amount_paid'], {}), "('Loan', doc.name, 'total_amount_paid', total_amount_paid)\n", (5609, 5667), False, 'import frappe, math, json\n'), ((5740, 5887), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Loan"""', "{'applicant_type': applicant_type, 'company': company, 'applicant':\n applicant, 'docstatus': 1}", '"""sum(loan_amount)"""'], {}), "('Loan', {'applicant_type': applicant_type, 'company':\n company, 'applicant': applicant, 'docstatus': 1}, 'sum(loan_amount)')\n", (5759, 5887), False, 'import frappe, math, json\n'), ((5966, 6126), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Sanctioned Loan Amount"""', "{'applicant_type': applicant_type, 'company': company, 'applicant': applicant}", '"""sanctioned_amount_limit"""'], {}), "('Sanctioned Loan Amount', {'applicant_type':\n applicant_type, 'company': company, 'applicant': applicant},\n 'sanctioned_amount_limit')\n", (5985, 6126), False, 'import frappe, math, json\n'), ((7327, 7379), 'frappe.get_doc', 'frappe.get_doc', (['"""Loan Application"""', 'loan_application'], {}), "('Loan Application', loan_application)\n", (7341, 7379), False, 'import frappe, math, json\n'), ((7457, 7530), 'frappe.db.set_value', 'frappe.db.set_value', (['"""Loan"""', 'loan', '"""total_amount_paid"""', 'total_amount_paid'], {}), "('Loan', loan, 'total_amount_paid', total_amount_paid)\n", (7476, 7530), False, 'import frappe, math, json\n'), ((7532, 7585), 'frappe.db.set_value', 'frappe.db.set_value', (['"""Loan"""', 'loan', '"""status"""', '"""Closed"""'], {}), "('Loan', loan, 'status', 'Closed')\n", (7551, 7585), False, 'import frappe, math, json\n'), ((7730, 7765), 'frappe.new_doc', 'frappe.new_doc', (['"""Loan Disbursement"""'], {}), "('Loan Disbursement')\n", (7744, 7765), False, 'import frappe, math, json\n'), ((7978, 7987), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (7985, 7987), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((8284, 8316), 'frappe.new_doc', 'frappe.new_doc', (['"""Loan Repayment"""'], {}), "('Loan Repayment')\n", (8298, 8316), False, 'import frappe, math, json\n'), ((8548, 8557), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (8555, 8557), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((8769, 8987), 'frappe.db.sql', 'frappe.db.sql', (['"""\n\t\tSELECT p.parent, p.loan_security, p.qty as qty FROM `tabLoan Security Pledge` lsp , `tabPledge` p\n\t\tWHERE p.parent = lsp.name AND lsp.loan = %s AND lsp.docstatus = 1\n\t"""', 'loan'], {'as_dict': '(1)'}), '(\n """\n\t\tSELECT p.parent, p.loan_security, p.qty as qty FROM `tabLoan Security Pledge` lsp , `tabPledge` p\n\t\tWHERE p.parent = lsp.name AND lsp.loan = %s AND lsp.docstatus = 1\n\t"""\n , loan, as_dict=1)\n', (8782, 8987), False, 'import frappe, math, json\n'), ((9000, 9040), 'frappe.new_doc', 'frappe.new_doc', (['"""Loan Security Unpledge"""'], {}), "('Loan Security Unpledge')\n", (9014, 9040), False, 'import frappe, math, json\n'), ((5292, 5441), 'frappe.db.sql', 'frappe.db.sql', (['"""UPDATE `tabLoan Security Pledge` SET\n\t\t\tloan = \'\', status = \'Unpledged\'\n\t\t\twhere name = %s """', 'self.loan_security_pledge'], {}), '(\n """UPDATE `tabLoan Security Pledge` SET\n\t\t\tloan = \'\', status = \'Unpledged\'\n\t\t\twhere name = %s """\n , self.loan_security_pledge)\n', (5305, 5441), False, 'import frappe, math, json\n'), ((6976, 7134), 'math.ceil', 'math.ceil', (['(loan_amount * monthly_interest_rate * (1 + monthly_interest_rate) **\n repayment_periods / ((1 + monthly_interest_rate) ** repayment_periods - 1))'], {}), '(loan_amount * monthly_interest_rate * (1 + monthly_interest_rate) **\n repayment_periods / ((1 + monthly_interest_rate) ** repayment_periods - 1))\n', (6985, 7134), False, 'import frappe, math, json\n'), ((1559, 1568), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (1566, 1568), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((1648, 1716), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Loan Type"""', 'self.loan_type', '"""rate_of_interest"""'], {}), "('Loan Type', self.loan_type, 'rate_of_interest')\n", (1667, 1716), False, 'import frappe, math, json\n'), ((2170, 2277), 'frappe.db.get_value', 'frappe.db.get_value', (['"""Loan Security Pledge"""', 'self.loan_security_pledge', "['loan', 'company']"], {'as_dict': '(1)'}), "('Loan Security Pledge', self.loan_security_pledge, [\n 'loan', 'company'], as_dict=1)\n", (2189, 2277), False, 'import frappe, math, json\n'), ((3502, 3575), 'frappe.utils.rounded', 'rounded', (['(balance_amount + interest_amount - self.monthly_repayment_amount)'], {}), '(balance_amount + interest_amount - self.monthly_repayment_amount)\n', (3509, 3575), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((3984, 4011), 'frappe.utils.add_months', 'add_months', (['payment_date', '(1)'], {}), '(payment_date, 1)\n', (3994, 4011), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((4930, 4947), 'frappe.throw', 'frappe.throw', (['msg'], {}), '(msg)\n', (4942, 4947), False, 'import frappe, math, json\n'), ((6304, 6353), 'frappe._', '_', (['"""Repayment Method is mandatory for term loans"""'], {}), "('Repayment Method is mandatory for term loans')\n", (6305, 6353), False, 'from frappe import _\n'), ((6453, 6488), 'frappe._', '_', (['"""Please enter Repayment Periods"""'], {}), "('Please enter Repayment Periods')\n", (6454, 6488), False, 'from frappe import _\n'), ((6913, 6934), 'frappe.utils.flt', 'flt', (['rate_of_interest'], {}), '(rate_of_interest)\n', (6916, 6934), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((2053, 2108), 'frappe._', '_', (['"""Loan Security Pledge is mandatory for secured loan"""'], {}), "('Loan Security Pledge is mandatory for secured loan')\n", (2054, 2108), False, 'from frappe import _\n'), ((2889, 2917), 'frappe.utils.flt', 'flt', (['sanctioned_amount_limit'], {}), '(sanctioned_amount_limit)\n', (2892, 2917), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((3132, 3185), 'frappe._', '_', (['"""Repayment Start Date is mandatory for term loans"""'], {}), "('Repayment Start Date is mandatory for term loans')\n", (3133, 3185), False, 'from frappe import _\n'), ((4992, 5021), 'frappe._', '_', (['"""Loan amount is mandatory"""'], {}), "('Loan amount is mandatory')\n", (4993, 5021), False, 'from frappe import _\n'), ((5205, 5219), 'frappe.utils.now_datetime', 'now_datetime', ([], {}), '()\n', (5217, 5219), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((6599, 6633), 'frappe._', '_', (['"""Please enter repayment Amount"""'], {}), "('Please enter repayment Amount')\n", (6600, 6633), False, 'from frappe import _\n'), ((6696, 6760), 'frappe._', '_', (['"""Monthly Repayment Amount cannot be greater than Loan Amount"""'], {}), "('Monthly Repayment Amount cannot be greater than Loan Amount')\n", (6697, 6760), False, 'from frappe import _\n'), ((7183, 7199), 'frappe.utils.flt', 'flt', (['loan_amount'], {}), '(loan_amount)\n', (7186, 7199), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((2496, 2559), 'frappe._', '_', (['"""Loan Security Pledge Company and Loan Company must be same"""'], {}), "('Loan Security Pledge Company and Loan Company must be same')\n", (2497, 2559), False, 'from frappe import _\n'), ((2840, 2861), 'frappe.utils.flt', 'flt', (['self.loan_amount'], {}), '(self.loan_amount)\n', (2843, 2861), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((2864, 2886), 'frappe.utils.flt', 'flt', (['total_loan_amount'], {}), '(total_loan_amount)\n', (2867, 2886), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((3012, 3039), 'frappe.bold', 'frappe.bold', (['self.applicant'], {}), '(self.applicant)\n', (3023, 3039), False, 'import frappe, math, json\n'), ((4851, 4894), 'frappe._', '_', (['"""Loan amount cannot be greater than {0}"""'], {}), "('Loan amount cannot be greater than {0}')\n", (4852, 4894), False, 'from frappe import _\n'), ((1259, 1284), 'frappe.bold', 'frappe.bold', (['self.company'], {}), '(self.company)\n', (1270, 1284), False, 'import frappe, math, json\n'), ((2935, 2983), 'frappe._', '_', (['"""Sanctioned Amount limit crossed for {0} {1}"""'], {}), "('Sanctioned Amount limit crossed for {0} {1}')\n", (2936, 2983), False, 'from frappe import _\n'), ((3373, 3399), 'frappe.utils.flt', 'flt', (['self.rate_of_interest'], {}), '(self.rate_of_interest)\n', (3376, 3399), False, 'from frappe.utils import flt, rounded, add_months, nowdate, getdate, now_datetime\n'), ((1164, 1212), 'frappe._', '_', (['"""Account {0} does not belongs to company {1}"""'], {}), "('Account {0} does not belongs to company {1}')\n", (1165, 1212), False, 'from frappe import _\n'), ((2330, 2388), 'frappe._', '_', (['"""Loan Security Pledge already pledged against loan {0}"""'], {}), "('Loan Security Pledge already pledged against loan {0}')\n", (2331, 2388), False, 'from frappe import _\n')]
|
import importlib
import pkgutil
import sys
import logging
import re
from PySide2 import QtWidgets
from . import plugins
def dcc_plugins():
_plugins = all_plugins()
dcc_plugins = {key: value for key, value in _plugins.items() if '_' not in key}
return dcc_plugins
def all_plugins():
_plugins = {}
for finder, name, ispkg in iter_namespace(plugins):
_plugins[name.split('.')[-1]] = name
return _plugins
def render_plugins(dcc):
_plugins = all_plugins()
pattern = re.compile('{}_'.format(re.escape(dcc)))
render_plugins = {}
for key, value in _plugins.items():
if pattern.match(key):
name = pattern.sub('', key)
render_plugins[name] = value
return render_plugins
def render_plugin(dcc, renderer):
name = '{}_{}'.format(dcc, renderer)
return all_plugins().get(name)
def iter_namespace(ns_pkg):
# Specifying the second argument (prefix) to iter_modules makes the
# returned name an absolute name instead of a relative one. This allows
# import_module to work without having to do additional modification to
# the name.
return pkgutil.iter_modules(ns_pkg.__path__, ns_pkg.__name__ + '.')
def plugin_class(cls, plugin):
plugin = '.{}'.format(plugin)
package = '{}.plugins'.format(__package__ or '')
try:
module = importlib.import_module(plugin, package=package)
cls = getattr(module, cls.__name__)
except ImportError as e:
logging.error(e)
logging.error(
'Could not find plugin: "{}{}" '
'Using base class instead.'.format(package, plugin))
return cls
|
[
"re.escape",
"logging.error",
"pkgutil.iter_modules",
"importlib.import_module"
] |
[((1145, 1205), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['ns_pkg.__path__', "(ns_pkg.__name__ + '.')"], {}), "(ns_pkg.__path__, ns_pkg.__name__ + '.')\n", (1165, 1205), False, 'import pkgutil\n'), ((1352, 1400), 'importlib.import_module', 'importlib.import_module', (['plugin'], {'package': 'package'}), '(plugin, package=package)\n', (1375, 1400), False, 'import importlib\n'), ((533, 547), 're.escape', 're.escape', (['dcc'], {}), '(dcc)\n', (542, 547), False, 'import re\n'), ((1482, 1498), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (1495, 1498), False, 'import logging\n')]
|
import json
import logging
from typing import Generator, Optional, Type, TypeVar
from urllib.parse import urlencode, urljoin
from urllib.request import urlopen
import requests
from thenewboston_node.business_logic.blockchain.base import BlockchainBase
from thenewboston_node.business_logic.blockchain.file_blockchain.sources import URLBlockSource
from thenewboston_node.business_logic.models import Block, BlockchainState
from thenewboston_node.business_logic.utils.blockchain_state import read_blockchain_state_file_from_source
from thenewboston_node.core.utils.types import hexstr
logger = logging.getLogger(__name__)
T = TypeVar('T', bound='NodeClient')
def setdefault_if_not_none(dict_, key, value):
if value is not None:
dict_.setdefault(key, value)
def requests_get(url):
# We need this function to mock it easier for unittests
return requests.get(url)
class NodeClient:
_instance = None
@classmethod
def get_instance(cls: Type[T]) -> T:
instance = cls._instance
if not instance:
cls._instance = instance = cls()
return instance
@staticmethod
def http_get(network_address, resource, *, parameters=None, should_raise=True):
# We do not use reverse() because client must be framework agnostic
url = urljoin(network_address, f'/api/v1/{resource}/')
if parameters:
url += '?' + urlencode(parameters)
try:
response = requests_get(url)
except Exception:
logger.warning('Could not GET %s', url, exc_info=True)
if should_raise:
raise
else:
return None
if should_raise:
response.raise_for_status()
else:
status_code = response.status_code
if status_code != requests.codes.ok:
logger.warning('Could not GET %s: HTTP%s: %s', url, status_code, response.text)
return None
try:
data = response.json()
except json.decoder.JSONDecodeError:
if should_raise:
raise
else:
logger.warning('Non-JSON response GET %s: %s', url, response.text, exc_info=True)
return None
return data
def list_resource(
self,
network_address,
resource,
*,
offset=None,
limit=None,
ordering=None,
parameters=None,
should_raise=True
):
parameters = parameters or {}
setdefault_if_not_none(parameters, 'offset', offset)
setdefault_if_not_none(parameters, 'limit', limit)
setdefault_if_not_none(parameters, 'ordering', ordering)
return self.http_get(network_address, resource, parameters=parameters, should_raise=should_raise)
def get_latest_blockchain_state_meta_by_network_address(self, network_address) -> Optional[dict]:
data = self.list_resource(
network_address, 'blockchain-states-meta', limit=1, ordering='-last_block_number', should_raise=False
)
if not data:
return None
results = data['results']
if not results:
return None
return results[0]
def get_latest_blockchain_state_binary_by_network_address(self, network_address) -> Optional[tuple[bytes, str]]:
meta = self.get_latest_blockchain_state_meta_by_network_address(network_address)
if meta is None:
return None
for url in meta['urls']:
logger.debug('Trying to get blockchain state binary from %s', url)
try:
with urlopen(url) as fo:
return fo.read(), url
except IOError:
logger.warning('Unable to read blockchain state from %s', url, exc_info=True)
continue
return None
def get_latest_blockchain_state_by_network_address(self, network_address) -> Optional[BlockchainState]:
meta = self.get_latest_blockchain_state_meta_by_network_address(network_address)
if meta is None:
return None
for url in meta['urls']:
try:
return read_blockchain_state_file_from_source(url)
except IOError:
logger.warning('Unable to read blockchain state from %s', url, exc_info=True)
continue
logger.warning('Could not read latest blockchain state from node: %s', network_address)
return None
def get_latest_blockchain_state_meta_by_network_addresses(self, network_addresses) -> Optional[dict]:
for network_address in network_addresses:
# TODO(dmu) CRITICAL: Try another network_address only if this one is unavailable
return self.get_latest_blockchain_state_meta_by_network_address(network_address)
return None
def list_block_chunks_meta_by_network_address(
self, network_address, from_block_number=None, to_block_number=None, offset=None, limit=None, direction=1
):
assert direction in (1, -1)
parameters = {}
setdefault_if_not_none(parameters, 'from_block_number', from_block_number)
setdefault_if_not_none(parameters, 'to_block_number', to_block_number)
data = self.list_resource(
network_address,
'block-chunks-meta',
offset=offset,
limit=limit,
ordering='start_block_number' if direction == 1 else '-start_block_number',
parameters=parameters,
should_raise=False
)
return None if data is None else data['results']
def get_latest_block_chunk_meta_by_network_address(self, network_address) -> Optional[dict]:
results = self.list_block_chunks_meta_by_network_address(network_address, limit=1, direction=-1)
return results[0] if results else None
def get_last_block_number_by_network_address(self, network_address):
block_chunk_meta = self.get_latest_block_chunk_meta_by_network_address(network_address)
if block_chunk_meta:
return block_chunk_meta['end_block_number']
return None
def get_latest_blockchain_state_meta_by_node_identifier(self, blockchain: BlockchainBase,
node_identifier: hexstr) -> Optional[dict]:
node = blockchain.get_node_by_identifier(node_identifier)
if node is None:
return None
network_addresses = node.network_addresses
if not network_addresses:
return None
return self.get_latest_blockchain_state_meta_by_network_addresses(network_addresses)
def yield_blocks_slice(self, network_address, from_block_number: int,
to_block_number: int) -> Generator[Block, None, None]:
# TODO(dmu) MEDIUM: Consider improvements for network failovers
# by the moment of downloading the last (incomplete) block chunk its name may change
# (because of becoming complete) therefore we retry
last_block_number = None
for _ in range(2):
block_chunks = self.list_block_chunks_meta_by_network_address(
network_address, from_block_number=from_block_number, to_block_number=to_block_number
)
for block_chunk in block_chunks:
# TODO(dmu) HIGH: Support download from more than one URL
url = block_chunk['urls'][0]
source = URLBlockSource(url)
try:
source.force_read()
except Exception:
logger.warning('Error trying to download %s', url)
break
for block in URLBlockSource(url):
block_number = block.get_block_number()
if from_block_number is not None and block_number < from_block_number:
# TODO(dmu) LOW: This can be optimized by applying the codition only to first block chunk
# (be careful first block chunk may be also the last)
# skip not requested block
continue
if last_block_number is not None and block_number <= last_block_number:
# TODO(dmu) LOW: This maybe excessive precaution
# We have seen this block already
continue
if to_block_number is not None and to_block_number < block_number:
return
yield block
last_block_number = block_number
if last_block_number is None:
continue
assert to_block_number is None or last_block_number <= to_block_number
if to_block_number is not None and last_block_number >= to_block_number: # defensive programming
break
from_block_number = last_block_number + 1
|
[
"urllib.parse.urljoin",
"urllib.parse.urlencode",
"thenewboston_node.business_logic.blockchain.file_blockchain.sources.URLBlockSource",
"urllib.request.urlopen",
"thenewboston_node.business_logic.utils.blockchain_state.read_blockchain_state_file_from_source",
"requests.get",
"typing.TypeVar",
"logging.getLogger"
] |
[((595, 622), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (612, 622), False, 'import logging\n'), ((628, 660), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': '"""NodeClient"""'}), "('T', bound='NodeClient')\n", (635, 660), False, 'from typing import Generator, Optional, Type, TypeVar\n'), ((869, 886), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (881, 886), False, 'import requests\n'), ((1308, 1356), 'urllib.parse.urljoin', 'urljoin', (['network_address', 'f"""/api/v1/{resource}/"""'], {}), "(network_address, f'/api/v1/{resource}/')\n", (1315, 1356), False, 'from urllib.parse import urlencode, urljoin\n'), ((1405, 1426), 'urllib.parse.urlencode', 'urlencode', (['parameters'], {}), '(parameters)\n', (1414, 1426), False, 'from urllib.parse import urlencode, urljoin\n'), ((4200, 4243), 'thenewboston_node.business_logic.utils.blockchain_state.read_blockchain_state_file_from_source', 'read_blockchain_state_file_from_source', (['url'], {}), '(url)\n', (4238, 4243), False, 'from thenewboston_node.business_logic.utils.blockchain_state import read_blockchain_state_file_from_source\n'), ((7506, 7525), 'thenewboston_node.business_logic.blockchain.file_blockchain.sources.URLBlockSource', 'URLBlockSource', (['url'], {}), '(url)\n', (7520, 7525), False, 'from thenewboston_node.business_logic.blockchain.file_blockchain.sources import URLBlockSource\n'), ((7748, 7767), 'thenewboston_node.business_logic.blockchain.file_blockchain.sources.URLBlockSource', 'URLBlockSource', (['url'], {}), '(url)\n', (7762, 7767), False, 'from thenewboston_node.business_logic.blockchain.file_blockchain.sources import URLBlockSource\n'), ((3649, 3661), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (3656, 3661), False, 'from urllib.request import urlopen\n')]
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2, java_heap_GB=7)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_poker_1m_fvec(self):
h2o.beta_features = True
csvPathname = 'poker/poker-hand-testing.data'
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, schema='put')
h2o_cmd.runRF(parseResult=parseResult, trees=3, timeoutSecs=800, retryDelaySecs=5)
if __name__ == '__main__':
h2o.unit_main()
|
[
"h2o.tear_down_cloud",
"h2o.unit_main",
"h2o_cmd.runRF",
"h2o.build_cloud",
"sys.path.extend",
"h2o_hosts.build_cloud_with_hosts",
"h2o_import.import_parse",
"h2o.check_sandbox_for_errors",
"h2o.decide_if_localhost"
] |
[((27, 61), 'sys.path.extend', 'sys.path.extend', (["['.', '..', 'py']"], {}), "(['.', '..', 'py'])\n", (42, 61), False, 'import unittest, time, sys\n'), ((842, 857), 'h2o.unit_main', 'h2o.unit_main', ([], {}), '()\n', (855, 857), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((175, 205), 'h2o.check_sandbox_for_errors', 'h2o.check_sandbox_for_errors', ([], {}), '()\n', (203, 205), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((269, 294), 'h2o.decide_if_localhost', 'h2o.decide_if_localhost', ([], {}), '()\n', (292, 294), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((481, 502), 'h2o.tear_down_cloud', 'h2o.tear_down_cloud', ([], {}), '()\n', (500, 502), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((650, 718), 'h2o_import.import_parse', 'h2i.import_parse', ([], {'bucket': '"""smalldata"""', 'path': 'csvPathname', 'schema': '"""put"""'}), "(bucket='smalldata', path=csvPathname, schema='put')\n", (666, 718), True, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((727, 813), 'h2o_cmd.runRF', 'h2o_cmd.runRF', ([], {'parseResult': 'parseResult', 'trees': '(3)', 'timeoutSecs': '(800)', 'retryDelaySecs': '(5)'}), '(parseResult=parseResult, trees=3, timeoutSecs=800,\n retryDelaySecs=5)\n', (740, 813), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((331, 365), 'h2o.build_cloud', 'h2o.build_cloud', (['(2)'], {'java_heap_GB': '(7)'}), '(2, java_heap_GB=7)\n', (346, 365), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n'), ((392, 426), 'h2o_hosts.build_cloud_with_hosts', 'h2o_hosts.build_cloud_with_hosts', ([], {}), '()\n', (424, 426), False, 'import h2o, h2o_cmd, h2o_hosts, h2o_import as h2i\n')]
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from six.moves.urllib import parse
from keystone.common import dependency
#from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LE
from keystone.openstack.common import versionutils
from keystone import token
from keystone.token import provider
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@dependency.requires('assignment_api', 'catalog_api', 'identity_api',
'resource_api', 'role_api')
class V3TokenDataHelper(object):
"""Token data helper."""
def __init__(self):
# Keep __init__ around to ensure dependency injection works.
super(V3TokenDataHelper, self).__init__()
def _get_filtered_domain(self, domain_id):
domain_ref = self.resource_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
project_ref = self.resource_api.get_project(project_id)
filtered_project = {
'id': project_ref['id'],
'name': project_ref['name']}
filtered_project['domain'] = self._get_filtered_domain(
project_ref['domain_id'])
return filtered_project
def _populate_scope(self, token_data, domain_id, project_id):
if 'domain' in token_data or 'project' in token_data:
# scope already exist, no need to populate it again
return
if domain_id:
token_data['domain'] = self._get_filtered_domain(domain_id)
if project_id:
token_data['project'] = self._get_filtered_project(project_id)
def _get_roles_for_user(self, user_id, domain_id, project_id):
roles = []
if domain_id:
roles = self.assignment_api.get_roles_for_user_and_domain(
user_id, domain_id)
if project_id:
roles = self.assignment_api.get_roles_for_user_and_project(
user_id, project_id)
return [self.role_api.get_role(role_id) for role_id in roles]
def _populate_roles_for_groups(self, group_ids,
project_id=None, domain_id=None,
user_id=None):
def _check_roles(roles, user_id, project_id, domain_id):
# User was granted roles so simply exit this function.
if roles:
return
if project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': project_id}
elif domain_id:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': domain_id}
# Since no roles were found a user is not authorized to
# perform any operations. Raise an exception with
# appropriate error message.
raise exception.Unauthorized(msg)
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
_check_roles(roles, user_id, project_id, domain_id)
return roles
def _populate_user(self, token_data, user_id):
if 'user' in token_data:
# no need to repopulate user if it already exists
return
user_ref = self.identity_api.get_user(user_id)
filtered_user = {
'id': user_ref['id'],
'name': user_ref['name'],
'domain': self._get_filtered_domain(user_ref['domain_id'])}
token_data['user'] = filtered_user
def _populate_roles(self, token_data, user_id, domain_id, project_id,
access_token):
if 'roles' in token_data:
# no need to repopulate roles
return
if access_token:
filtered_roles = []
authed_role_ids = jsonutils.loads(access_token['role_ids'])
all_roles = self.role_api.list_roles()
for role in all_roles:
for authed_role in authed_role_ids:
if authed_role == role['id']:
filtered_roles.append({'id': role['id'],
'name': role['name']})
token_data['roles'] = filtered_roles
return
token_user_id = user_id
token_project_id = project_id
token_domain_id = domain_id
if token_domain_id or token_project_id:
roles = self._get_roles_for_user(token_user_id,
token_domain_id,
token_project_id)
filtered_roles = []
for role in roles:
filtered_roles.append({'id': role['id'],
'name': role['name']})
# user has no project or domain roles, therefore access denied
if not filtered_roles:
if token_project_id:
msg = _('User %(user_id)s has no access '
'to project %(project_id)s') % {
'user_id': user_id,
'project_id': token_project_id}
else:
msg = _('User %(user_id)s has no access '
'to domain %(domain_id)s') % {
'user_id': user_id,
'domain_id': token_domain_id}
LOG.debug(msg)
raise exception.Unauthorized(msg)
token_data['roles'] = filtered_roles
def _populate_service_catalog(self, token_data, user_id,
domain_id, project_id):
if 'catalog' in token_data:
# no need to repopulate service catalog
return
if project_id or domain_id:
service_catalog = self.catalog_api.get_v3_catalog(
user_id, project_id)
token_data['catalog'] = service_catalog
def _populate_token_dates(self, token_data, expires=None,
issued_at=None):
if not expires:
expires = provider.default_expire_time()
if not isinstance(expires, six.string_types):
expires = timeutils.isotime(expires, subsecond=True)
token_data['expires_at'] = expires
token_data['issued_at'] = (issued_at or
timeutils.isotime(subsecond=True))
def _populate_audit_info(self, token_data, audit_info=None):
if audit_info is None or isinstance(audit_info, six.string_types):
token_data['audit_ids'] = provider.audit_info(audit_info)
elif isinstance(audit_info, list):
token_data['audit_ids'] = audit_info
else:
msg = (_('Invalid audit info data type: %(data)s (%(type)s)') %
{'data': audit_info, 'type': type(audit_info)})
LOG.error(msg)
raise exception.UnexpectedError(msg)
def get_token_data(self, user_id, method_names, extras=None,
domain_id=None, project_id=None, expires=None,
token=None, include_catalog=True,
bind=None, access_token=None, issued_at=None,
audit_info=None):
if extras is None:
extras = {}
if extras:
versionutils.deprecated(
what='passing token data with "extras"',
as_of=versionutils.deprecated.KILO,
in_favor_of='well-defined APIs')
token_data = {'methods': method_names,
'extras': extras}
# We've probably already written these to the token
if token:
for x in ('roles', 'user', 'catalog', 'project', 'domain'):
if x in token:
token_data[x] = token[x]
if bind:
token_data['bind'] = bind
self._populate_scope(token_data, domain_id, project_id)
self._populate_user(token_data, user_id)
self._populate_roles(token_data, user_id, domain_id, project_id,
access_token)
self._populate_audit_info(token_data, audit_info)
if include_catalog:
self._populate_service_catalog(token_data, user_id, domain_id,
project_id)
self._populate_token_dates(token_data, expires=expires,
issued_at=issued_at)
return {'token': token_data}
@dependency.requires('catalog_api', 'identity_api', 'resource_api', 'role_api')
class BaseProvider(provider.Provider):
def __init__(self, *args, **kwargs):
super(BaseProvider, self).__init__(*args, **kwargs)
self.v3_token_data_helper = V3TokenDataHelper()
def get_token_version(self, token_data):
if token_data and isinstance(token_data, dict):
if 'token_version' in token_data:
if token_data['token_version'] in token.provider.VERSIONS:
return token_data['token_version']
# FIXME(morganfainberg): deprecate the following logic in future
# revisions. It is better to just specify the token_version in
# the token_data itself. This way we can support future versions
# that might have the same fields.
if 'token' in token_data and 'methods' in token_data['token']:
return token.provider.V3
raise exception.UnsupportedTokenVersionException()
def issue_v3_token(self, user_id, method_names, expires_at=None,
project_id=None, domain_id=None, auth_context=None,
metadata_ref=None, include_catalog=True,
parent_audit_id=None):
token_ref = None
access_token = None
token_data = self.v3_token_data_helper.get_token_data(
user_id,
method_names,
auth_context.get('extras') if auth_context else None,
domain_id=domain_id,
project_id=project_id,
expires=expires_at,
bind=auth_context.get('bind') if auth_context else None,
token=token_ref,
include_catalog=include_catalog,
access_token=access_token,
audit_info=parent_audit_id)
token_id = self._get_token_id(token_data)
return token_id, token_data
def _verify_token_ref(self, token_ref):
"""Verify and return the given token_ref."""
if not token_ref:
raise exception.Unauthorized()
return token_ref
def _assert_default_domain(self, token_ref):
"""Make sure we are operating on default domain only."""
if (token_ref.get('token_data') and
self.get_token_version(token_ref.get('token_data')) ==
token.provider.V3):
# this is a V3 token
msg = _('Non-default domain is not supported')
# user in a non-default is prohibited
if (token_ref['token_data']['token']['user']['domain']['id'] !=
CONF.identity.admin_domain_id):
raise exception.Unauthorized(msg)
# domain scoping is prohibited
if token_ref['token_data']['token'].get('domain'):
raise exception.Unauthorized(
_('Domain scoped token is not supported'))
# project in non-default domain is prohibited
if token_ref['token_data']['token'].get('project'):
project = token_ref['token_data']['token']['project']
project_domain_id = project['domain']['id']
# scoped to project in non-default domain is prohibited
if project_domain_id != CONF.identity.admin_domain_id:
raise exception.Unauthorized(msg)
metadata_ref = token_ref['metadata']
def validate_v3_token(self, token_ref):
# FIXME(gyee): performance or correctness? Should we return the
# cached token or reconstruct it? Obviously if we are going with
# the cached token, any role, project, or domain name changes
# will not be reflected. One may argue that with PKI tokens,
# we are essentially doing cached token validation anyway.
# Lets go with the cached token strategy. Since token
# management layer is now pluggable, one can always provide
# their own implementation to suit their needs.
token_data = token_ref.get('token_data')
if not token_data or 'token' not in token_data:
# token ref is created by V2 API
project_id = None
project_ref = token_ref.get('tenant')
if project_ref:
project_id = project_ref['id']
issued_at = token_ref['token_data']['access']['token']['issued_at']
audit = token_ref['token_data']['access']['token'].get('audit_ids')
token_data = self.v3_token_data_helper.get_token_data(
token_ref['user']['id'],
['password', '<PASSWORD>'],
project_id=project_id,
bind=token_ref.get('bind'),
expires=token_ref['expires'],
issued_at=issued_at,
audit_info=audit)
return token_data
|
[
"oslo_log.log.getLogger",
"keystone.exception.UnexpectedError",
"keystone.exception.Unauthorized",
"keystone.common.dependency.requires",
"keystone.token.provider.audit_info",
"keystone.openstack.common.versionutils.deprecated",
"oslo_utils.timeutils.isotime",
"keystone.token.provider.default_expire_time",
"oslo_serialization.jsonutils.loads",
"keystone.exception.UnsupportedTokenVersionException",
"keystone.i18n._"
] |
[((1026, 1049), 'oslo_log.log.getLogger', 'log.getLogger', (['__name__'], {}), '(__name__)\n', (1039, 1049), False, 'from oslo_log import log\n'), ((1069, 1169), 'keystone.common.dependency.requires', 'dependency.requires', (['"""assignment_api"""', '"""catalog_api"""', '"""identity_api"""', '"""resource_api"""', '"""role_api"""'], {}), "('assignment_api', 'catalog_api', 'identity_api',\n 'resource_api', 'role_api')\n", (1088, 1169), False, 'from keystone.common import dependency\n'), ((9537, 9615), 'keystone.common.dependency.requires', 'dependency.requires', (['"""catalog_api"""', '"""identity_api"""', '"""resource_api"""', '"""role_api"""'], {}), "('catalog_api', 'identity_api', 'resource_api', 'role_api')\n", (9556, 9615), False, 'from keystone.common import dependency\n'), ((10496, 10540), 'keystone.exception.UnsupportedTokenVersionException', 'exception.UnsupportedTokenVersionException', ([], {}), '()\n', (10538, 10540), False, 'from keystone import exception\n'), ((3768, 3795), 'keystone.exception.Unauthorized', 'exception.Unauthorized', (['msg'], {}), '(msg)\n', (3790, 3795), False, 'from keystone import exception\n'), ((4815, 4856), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (["access_token['role_ids']"], {}), "(access_token['role_ids'])\n", (4830, 4856), False, 'from oslo_serialization import jsonutils\n'), ((7145, 7175), 'keystone.token.provider.default_expire_time', 'provider.default_expire_time', ([], {}), '()\n', (7173, 7175), False, 'from keystone.token import provider\n'), ((7252, 7294), 'oslo_utils.timeutils.isotime', 'timeutils.isotime', (['expires'], {'subsecond': '(True)'}), '(expires, subsecond=True)\n', (7269, 7294), False, 'from oslo_utils import timeutils\n'), ((7421, 7454), 'oslo_utils.timeutils.isotime', 'timeutils.isotime', ([], {'subsecond': '(True)'}), '(subsecond=True)\n', (7438, 7454), False, 'from oslo_utils import timeutils\n'), ((7635, 7666), 'keystone.token.provider.audit_info', 'provider.audit_info', (['audit_info'], {}), '(audit_info)\n', (7654, 7666), False, 'from keystone.token import provider\n'), ((8377, 8515), 'keystone.openstack.common.versionutils.deprecated', 'versionutils.deprecated', ([], {'what': '"""passing token data with "extras\\""""', 'as_of': 'versionutils.deprecated.KILO', 'in_favor_of': '"""well-defined APIs"""'}), '(what=\'passing token data with "extras"\', as_of=\n versionutils.deprecated.KILO, in_favor_of=\'well-defined APIs\')\n', (8400, 8515), False, 'from keystone.openstack.common import versionutils\n'), ((11579, 11603), 'keystone.exception.Unauthorized', 'exception.Unauthorized', ([], {}), '()\n', (11601, 11603), False, 'from keystone import exception\n'), ((11946, 11986), 'keystone.i18n._', '_', (['"""Non-default domain is not supported"""'], {}), "('Non-default domain is not supported')\n", (11947, 11986), False, 'from keystone.i18n import _, _LE\n'), ((6494, 6521), 'keystone.exception.Unauthorized', 'exception.Unauthorized', (['msg'], {}), '(msg)\n', (6516, 6521), False, 'from keystone import exception\n'), ((7961, 7991), 'keystone.exception.UnexpectedError', 'exception.UnexpectedError', (['msg'], {}), '(msg)\n', (7986, 7991), False, 'from keystone import exception\n'), ((12187, 12214), 'keystone.exception.Unauthorized', 'exception.Unauthorized', (['msg'], {}), '(msg)\n', (12209, 12214), False, 'from keystone import exception\n'), ((3143, 3204), 'keystone.i18n._', '_', (['"""User %(user_id)s has no access to project %(project_id)s"""'], {}), "('User %(user_id)s has no access to project %(project_id)s')\n", (3144, 3204), False, 'from keystone.i18n import _, _LE\n'), ((7792, 7846), 'keystone.i18n._', '_', (['"""Invalid audit info data type: %(data)s (%(type)s)"""'], {}), "('Invalid audit info data type: %(data)s (%(type)s)')\n", (7793, 7846), False, 'from keystone.i18n import _, _LE\n'), ((12387, 12428), 'keystone.i18n._', '_', (['"""Domain scoped token is not supported"""'], {}), "('Domain scoped token is not supported')\n", (12388, 12428), False, 'from keystone.i18n import _, _LE\n'), ((12851, 12878), 'keystone.exception.Unauthorized', 'exception.Unauthorized', (['msg'], {}), '(msg)\n', (12873, 12878), False, 'from keystone import exception\n'), ((3388, 3447), 'keystone.i18n._', '_', (['"""User %(user_id)s has no access to domain %(domain_id)s"""'], {}), "('User %(user_id)s has no access to domain %(domain_id)s')\n", (3389, 3447), False, 'from keystone.i18n import _, _LE\n'), ((5971, 6032), 'keystone.i18n._', '_', (['"""User %(user_id)s has no access to project %(project_id)s"""'], {}), "('User %(user_id)s has no access to project %(project_id)s')\n", (5972, 6032), False, 'from keystone.i18n import _, _LE\n'), ((6232, 6291), 'keystone.i18n._', '_', (['"""User %(user_id)s has no access to domain %(domain_id)s"""'], {}), "('User %(user_id)s has no access to domain %(domain_id)s')\n", (6233, 6291), False, 'from keystone.i18n import _, _LE\n')]
|
from __future__ import division
import numpy as np
from pdb import set_trace
class Counter:
def __init__(self, before, after, indx):
self.indx = indx
self.actual = before
self.predicted = after
self.TP, self.TN, self.FP, self.FN = 0, 0, 0, 0
for a, b in zip(self.actual, self.predicted):
if a == indx and b == indx:
self.TP += 1
elif a == b and a != indx:
self.TN += 1
elif a != indx and b == indx:
self.FP += 1
elif a == indx and b != indx:
self.FN += 1
elif a != indx and b != indx:
pass
def stats(self):
try:
Sen = self.TP / (self.TP + self.FN)
Spec = self.TN / (self.TN + self.FP)
Prec = self.TP / (self.TP + self.FP)
# Acc = (self.TP + self.TN) / (self.TP + self.FN + self.TN + self.FP)
F1 = 2 * (Prec * Sen) / (Prec + Sen)
G = np.sqrt(Sen * Spec)
# ED = np.sqrt(0.6*(1-Sen)**2+0.3*(1-Spec)**2)
ED = 2 * Sen * Spec / (Sen + Spec)
return Sen * 100, (1 - Spec) * 100, Prec * 100, Sen * 100, F1 * 100, ED * 100, G * 100
except ZeroDivisionError:
return 0, 0, 0, 0, 0, 0, 0
class ABCD:
""" Statistics Stuff, confusion matrix, all that jazz...
"""
def __init__(self, before, after):
self.actual = before
self.predicted = after
def __call__(self):
uniques = set(self.actual)
for u in list(uniques):
yield Counter(self.actual, self.predicted, indx=u)
|
[
"numpy.sqrt"
] |
[((1004, 1023), 'numpy.sqrt', 'np.sqrt', (['(Sen * Spec)'], {}), '(Sen * Spec)\n', (1011, 1023), True, 'import numpy as np\n')]
|
"""
forms.py
Web forms based on Flask-WTForms
See: http://flask.pocoo.org/docs/patterns/wtforms/
http://wtforms.simplecodes.com/
"""
from flaskext import wtf
from flaskext.wtf import validators
from wtforms.ext.appengine.ndb import model_form
from .models import SchoolModel
class ClassicExampleForm(wtf.Form):
example_name = wtf.TextField('Name', validators=[validators.Required()])
example_description = wtf.TextAreaField('Description', validators=[validators.Required()])
# App Engine ndb model form example
ExampleForm = model_form(SchoolModel, wtf.Form, field_args={
'example_name': dict(validators=[validators.Required()]),
'example_description': dict(validators=[validators.Required()]),
})
|
[
"flaskext.wtf.validators.Required"
] |
[((376, 397), 'flaskext.wtf.validators.Required', 'validators.Required', ([], {}), '()\n', (395, 397), False, 'from flaskext.wtf import validators\n'), ((471, 492), 'flaskext.wtf.validators.Required', 'validators.Required', ([], {}), '()\n', (490, 492), False, 'from flaskext.wtf import validators\n'), ((630, 651), 'flaskext.wtf.validators.Required', 'validators.Required', ([], {}), '()\n', (649, 651), False, 'from flaskext.wtf import validators\n'), ((699, 720), 'flaskext.wtf.validators.Required', 'validators.Required', ([], {}), '()\n', (718, 720), False, 'from flaskext.wtf import validators\n')]
|
"""Blocks of layers to build models.
"""
import tensorflow as tf
from tensorflow.keras import layers as kl
def conv2d_block(filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='relu',
batch_normalization=True,
name='conv2d_block'):
conv = kl.Conv2D(filters,
kernel_size,
strides,
padding,
name=name + '/conv2d')
if batch_normalization:
batch_norm = kl.BatchNormalization(name=name + '/bn')
if callable(activation):
activation = activation
else:
activation = kl.Activation(activation,
name=name + '/activation')
def call(x):
x = conv(x)
if batch_normalization:
x = batch_norm(x)
x = activation(x)
return x
return call
def conv2d_encoder(filter_base,
num_res_levels,
blocks_per_res,
batch_normalization='all',
name='conv2d_encoder',
**conv_kwargs):
filters = [filter_base * 2**i for i in range(num_res_levels)]
if batch_normalization == 'all':
bn = True
activate_bn = True
elif batch_normalization == 'after-first':
bn = False
activate_bn = True
elif batch_normalization == 'none':
bn = False
activate_bn = False
else:
raise ValueError("batch_normalization must be one of ['all', 'after-first', 'none'].")
# Build the layers
layers = []
for f_idx, f in enumerate(filters):
for idx in range(blocks_per_res - 1):
lay = conv2d_block(f,
strides=(1, 1),
batch_normalization=bn,
name=name + '/block_' + str(f_idx) + '_' +
str(idx),
**conv_kwargs)
layers.append(lay)
bn = bn or activate_bn
lay = conv2d_block(f,
strides=(2, 2),
batch_normalization=bn,
name=name + '/block_' + str(f_idx) + '_' +
str(blocks_per_res),
**conv_kwargs)
layers.append(lay)
bn = bn or activate_bn
def call(x):
for l in layers:
x = l(x)
return x
return call
def residual_block(layers):
def call(x):
inp = x
for l in layers:
x = l(x)
return x + inp
return call
def conv2d_residual_block(filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
activation='relu',
batch_normalization=True,
name='conv2d_residual'):
return residual_block([
conv2d_block(filters,
kernel_size,
strides,
padding,
activation,
batch_normalization,
name=name + '/conv_block_0'),
conv2d_block(filters,
kernel_size,
strides,
padding,
'linear',
batch_normalization,
name=name + '/conv_block_1')
])
def conv2d_residual_encoder(num_residual_blocks=16,
activation=None,
name='conv2d_res_encoder'):
if activation is None:
activation = kl.PReLU(shared_axes=[1, 2])
residual_blocks = []
for idx in range(num_residual_blocks):
residual_blocks.append(
conv2d_residual_block(activation=activation,
filters=64,
kernel_size=(3, 3),
name=name + '/res_block_' + str(idx)))
layers = [
conv2d_block(64,
kernel_size=(9, 9),
strides=(1, 1),
batch_normalization=False,
activation=activation,
name=name + '/conv2d_first'),
residual_block(layers=[
*residual_blocks,
conv2d_block(filters=64,
kernel_size=(3, 3),
activation='linear',
name=name + '/conv2d_last')
]),
]
def call(x):
for l in layers:
x = l(x)
return x
return call
def rrdb(filters=64,
inner_filters=32,
res_factor=0.2,
num_blocks=3,
name='rrdb',
**rdb_kwargs):
"""Residual-in-Residual Dense Block
"""
blocks = []
for idx in range(num_blocks):
blocks.append(
residual_dense_block(filters, inner_filters,
name=name + '/rdb_' + str(idx),
**rdb_kwargs))
def call(x):
x_res = x
for b in blocks:
x_res = b(x_res)
return x_res * res_factor + x
return call
def residual_dense_block(
filters=64,
inner_filters=32,
num_conv=5,
res_factor=0.2,
use_bias=True,
activation='relu',
name='rdb'):
"""Residual Dense Block
"""
conv_layers = []
filters_list = [inner_filters] * (num_conv - 1) + [filters]
for idx, f in enumerate(filters_list):
conv_layers.append(
kl.Conv2D(f, (3, 3), (1, 1), 'SAME',
use_bias=use_bias,
name=name + '/conv_' + str(idx)))
# Activation
if callable(activation):
activation = activation
else:
activation = kl.Activation(activation)
def call(x):
x_cat = x
for l in conv_layers[:-1]:
x_conv = activation(l(x_cat))
x_cat = tf.concat([x_cat, x_conv], axis=-1)
return conv_layers[-1](x_cat) * res_factor + x
return call
|
[
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.concat",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.layers.Activation"
] |
[((367, 439), 'tensorflow.keras.layers.Conv2D', 'kl.Conv2D', (['filters', 'kernel_size', 'strides', 'padding'], {'name': "(name + '/conv2d')"}), "(filters, kernel_size, strides, padding, name=name + '/conv2d')\n", (376, 439), True, 'from tensorflow.keras import layers as kl\n'), ((573, 613), 'tensorflow.keras.layers.BatchNormalization', 'kl.BatchNormalization', ([], {'name': "(name + '/bn')"}), "(name=name + '/bn')\n", (594, 613), True, 'from tensorflow.keras import layers as kl\n'), ((706, 758), 'tensorflow.keras.layers.Activation', 'kl.Activation', (['activation'], {'name': "(name + '/activation')"}), "(activation, name=name + '/activation')\n", (719, 758), True, 'from tensorflow.keras import layers as kl\n'), ((3743, 3771), 'tensorflow.keras.layers.PReLU', 'kl.PReLU', ([], {'shared_axes': '[1, 2]'}), '(shared_axes=[1, 2])\n', (3751, 3771), True, 'from tensorflow.keras import layers as kl\n'), ((5941, 5966), 'tensorflow.keras.layers.Activation', 'kl.Activation', (['activation'], {}), '(activation)\n', (5954, 5966), True, 'from tensorflow.keras import layers as kl\n'), ((6100, 6135), 'tensorflow.concat', 'tf.concat', (['[x_cat, x_conv]'], {'axis': '(-1)'}), '([x_cat, x_conv], axis=-1)\n', (6109, 6135), True, 'import tensorflow as tf\n')]
|
import pytest
import mock
import json
from prometheus_udp_gateway import (
ReceiveMetricProtocol, UDPRegistry, Counter
)
@pytest.fixture()
def udp_registry():
return UDPRegistry(host='invalid', port=0000)
@pytest.fixture()
def counter(udp_registry):
counter = Counter('test_counter', 'Test', registry=udp_registry)
counter._gateway_client.send = mock.MagicMock()
return counter
def test_udp_counter_inc(counter):
counter.inc()
counter._gateway_client.send.assert_called_with(
data={
'name': 'test_counter',
'method': 'inc',
'value': 1
})
def test_udp_counter_inc_value(counter):
counter.inc(2)
counter._gateway_client.send.assert_called_with(
data={
'name': 'test_counter',
'method': 'inc',
'value': 2
})
@pytest.fixture()
def receive_metric_protocol():
receive_metric_protocol = ReceiveMetricProtocol(
log=mock.Mock(),
metrics={
'test_counter': mock.Mock(),
}
)
return receive_metric_protocol
def test_receive_incorrect_format(receive_metric_protocol):
receive_metric_protocol.datagramReceived(b'{test', ('invalid_host', 0000))
receive_metric_protocol._log.error.assert_called()
def test_receive_incorrect_json(receive_metric_protocol):
receive_metric_protocol.datagramReceived(b'{"t": "est"}', ('invalid_host', 0000))
receive_metric_protocol._log.error.assert_called()
def test_receive_incorrect_method(receive_metric_protocol):
receive_metric_protocol.datagramReceived(
json.dumps({'name': 'test_counter', 'method': 'incorrect', 'value': 1}),
('invalid_host', 000)
)
receive_metric_protocol._log.error.assert_called()
def test_receive_counter_inc(receive_metric_protocol):
receive_metric_protocol.datagramReceived(
json.dumps({'name': 'test_counter', 'method': 'inc', 'value': 1}),
('invalid_host', 0000)
)
receive_metric_protocol._metrics['test_counter'].inc.assert_called_with(1)
def test_receive_counter_inc_value(receive_metric_protocol):
receive_metric_protocol.datagramReceived(
json.dumps({'name': 'test_counter', 'method': 'inc', 'value': 5}),
('invalid_host', 0000)
)
receive_metric_protocol._metrics['test_counter'].inc.assert_called_with(5)
|
[
"pytest.fixture",
"json.dumps",
"prometheus_udp_gateway.Counter",
"prometheus_udp_gateway.UDPRegistry",
"mock.Mock",
"mock.MagicMock"
] |
[((140, 156), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (154, 156), False, 'import pytest\n'), ((235, 251), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (249, 251), False, 'import pytest\n'), ((892, 908), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (906, 908), False, 'import pytest\n'), ((190, 225), 'prometheus_udp_gateway.UDPRegistry', 'UDPRegistry', ([], {'host': '"""invalid"""', 'port': '(0)'}), "(host='invalid', port=0)\n", (201, 225), False, 'from prometheus_udp_gateway import ReceiveMetricProtocol, UDPRegistry, Counter\n'), ((295, 349), 'prometheus_udp_gateway.Counter', 'Counter', (['"""test_counter"""', '"""Test"""'], {'registry': 'udp_registry'}), "('test_counter', 'Test', registry=udp_registry)\n", (302, 349), False, 'from prometheus_udp_gateway import ReceiveMetricProtocol, UDPRegistry, Counter\n'), ((386, 402), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (400, 402), False, 'import mock\n'), ((1664, 1735), 'json.dumps', 'json.dumps', (["{'name': 'test_counter', 'method': 'incorrect', 'value': 1}"], {}), "({'name': 'test_counter', 'method': 'incorrect', 'value': 1})\n", (1674, 1735), False, 'import json\n'), ((1947, 2012), 'json.dumps', 'json.dumps', (["{'name': 'test_counter', 'method': 'inc', 'value': 1}"], {}), "({'name': 'test_counter', 'method': 'inc', 'value': 1})\n", (1957, 2012), False, 'import json\n'), ((2255, 2320), 'json.dumps', 'json.dumps', (["{'name': 'test_counter', 'method': 'inc', 'value': 5}"], {}), "({'name': 'test_counter', 'method': 'inc', 'value': 5})\n", (2265, 2320), False, 'import json\n'), ((1008, 1019), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1017, 1019), False, 'import mock\n'), ((1069, 1080), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1078, 1080), False, 'import mock\n')]
|
import json
import random
import networkx as nx
from tiledb.cloud.dag import status as st
def build_graph_node_details(nodes):
"""
:param nodes: List of nodes to get status of
:return: tuple of node_colors and node_text
"""
# Loop over statuses to set color and label.
# If you rerun this cell and the one below you can update the graph
node_colors = []
node_text = []
for node in nodes:
status = node.status
if status == st.Status.NOT_STARTED:
node_text.append("{} - Not Started".format(node.name))
node_colors.append("black")
elif status == st.Status.RUNNING:
node_text.append("{} - Running".format(node.name))
node_colors.append("blue")
elif status == st.Status.COMPLETED:
node_text.append("{} - Completed".format(node.name))
node_colors.append("green")
elif status == st.Status.FAILED:
node_text.append("{} - Failed".format(node.name))
node_colors.append("red")
elif status == st.Status.CANCELLED:
node_text.append("{} - Cancelled".format(node.name))
node_colors.append("yellow")
return (node_colors, node_text)
def update_plotly_graph(nodes, fig=None):
"""
Update a graph based on based node status and figure
:param nodes: list of notes to update
:param fig:
:return:
"""
(node_colors, node_text) = build_graph_node_details(nodes)
if fig is not None:
fig.update_traces(
marker=dict(color=node_colors),
text=node_text,
selector=dict(mode="markers"),
)
def update_tiledb_graph(nodes, edges, node_details, positions, fig):
"""
Update a tiledb plot widge graph
:param nodes: nodes of graph
:param edges: edges for graph
:param node_details: Node details
:param positions: positions for graph
:param fig: figure
:return:
"""
if fig is not None:
fig.setData(
json.dumps(
dict(
nodes=nodes,
edges=edges,
node_details=node_details,
positions=positions,
)
)
)
def hierarchy_pos(
G, root=None, width=1.0, vert_gap=0.2, vert_loc=0, leaf_vs_root_factor=0.5
):
"""
Taken from https://epidemicsonnetworks.readthedocs.io/en/latest/_modules/EoN/auxiliary.html#hierarchy_pos
Licensed under MIT: https://epidemicsonnetworks.readthedocs.io/en/latest/_downloads/8e9c8138fef49ddba8102fa7799c29d7/license.txt
If the graph is a tree this will return the positions to plot this in a
hierarchical layout.
Based on Joel's answer at https://stackoverflow.com/a/29597209/2966723,
but with some modifications.
We include this because it may be useful for plotting transmission trees,
and there is currently no networkx equivalent (though it may be coming soon).
There are two basic approaches we think of to allocate the horizontal
location of a node.
- Top down: we allocate horizontal space to a node. Then its ``k``
descendants split up that horizontal space equally. This tends to result
in overlapping nodes when some have many descendants.
- Bottom up: we allocate horizontal space to each leaf node. A node at a
higher level gets the entire space allocated to its descendant leaves.
Based on this, leaf nodes at higher levels get the same space as leaf
nodes very deep in the tree.
We use use both of these approaches simultaneously with ``leaf_vs_root_factor``
determining how much of the horizontal space is based on the bottom up
or top down approaches. ``0`` gives pure bottom up, while 1 gives pure top
down.
:Arguments:
**G** the graph (must be a tree)
**root** the root node of the tree
- if the tree is directed and this is not given, the root will be found and used
- if the tree is directed and this is given, then the positions will be
just for the descendants of this node.
- if the tree is undirected and not given, then a random choice will be used.
**width** horizontal space allocated for this branch - avoids overlap with other branches
**vert_gap** gap between levels of hierarchy
**vert_loc** vertical location of root
**leaf_vs_root_factor**
xcenter: horizontal location of root
"""
if not nx.is_tree(G):
raise TypeError("cannot use hierarchy_pos on a graph that is not a tree")
if root is None:
if isinstance(G, nx.DiGraph):
root = next(
iter(nx.topological_sort(G))
) # allows back compatibility with nx version 1.11
else:
root = random.choice(list(G.nodes))
def _hierarchy_pos(
G,
root,
leftmost,
width,
leafdx=0.2,
vert_gap=0.2,
vert_loc=0,
xcenter=0.5,
rootpos=None,
leafpos=None,
parent=None,
):
"""
see hierarchy_pos docstring for most arguments
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch. - only affects it if non-directed
"""
if rootpos is None:
rootpos = {root: (xcenter, vert_loc)}
else:
rootpos[root] = (xcenter, vert_loc)
if leafpos is None:
leafpos = {}
children = list(G.neighbors(root))
leaf_count = 0
if not isinstance(G, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children) != 0:
rootdx = width / len(children)
nextx = xcenter - width / 2 - rootdx / 2
for child in children:
nextx += rootdx
rootpos, leafpos, newleaves = _hierarchy_pos(
G,
child,
leftmost + leaf_count * leafdx,
width=rootdx,
leafdx=leafdx,
vert_gap=vert_gap,
vert_loc=vert_loc - vert_gap,
xcenter=nextx,
rootpos=rootpos,
leafpos=leafpos,
parent=root,
)
leaf_count += newleaves
leftmostchild = min((x for x, y in [leafpos[child] for child in children]))
rightmostchild = max((x for x, y in [leafpos[child] for child in children]))
leafpos[root] = ((leftmostchild + rightmostchild) / 2, vert_loc)
else:
leaf_count = 1
leafpos[root] = (leftmost, vert_loc)
# pos[root] = (leftmost + (leaf_count-1)*dx/2., vert_loc)
# print(leaf_count)
return rootpos, leafpos, leaf_count
xcenter = width / 2.0
if isinstance(G, nx.DiGraph):
leafcount = len(
[node for node in nx.descendants(G, root) if G.out_degree(node) == 0]
)
elif isinstance(G, nx.Graph):
leafcount = len(
[
node
for node in nx.node_connected_component(G, root)
if G.degree(node) == 1 and node != root
]
)
rootpos, leafpos, leaf_count = _hierarchy_pos(
G,
root,
0,
width,
leafdx=width * 1.0 / leafcount,
vert_gap=vert_gap,
vert_loc=vert_loc,
xcenter=xcenter,
)
pos = {}
for node in rootpos:
pos[node] = (
leaf_vs_root_factor * leafpos[node][0]
+ (1 - leaf_vs_root_factor) * rootpos[node][0],
leafpos[node][1],
)
# pos = {node:(leaf_vs_root_factor*x1+(1-leaf_vs_root_factor)*x2, y1) for ((x1,y1), (x2,y2)) in (leafpos[node], rootpos[node]) for node in rootpos}
xmax = max(x for x, y in pos.values())
for node in pos:
pos[node] = (pos[node][0] * width / xmax, pos[node][1])
return pos
def build_visualization_positions(network):
"""
Builds the positional spacing of all nodes(markers) based on either pydot if available or falling back
to a python computation
:param network:
:return: position array
"""
try:
# First try to use pydot and dot, as it produces the most aesthetically pleasing trees
from networkx.drawing.nx_pydot import pydot_layout
return pydot_layout(network, prog="dot")
except:
# Fall back to python function so we don't have to require users to install graphviz
return hierarchy_pos(network, width=2.0, leaf_vs_root_factor=1.0)
|
[
"networkx.drawing.nx_pydot.pydot_layout",
"networkx.descendants",
"networkx.topological_sort",
"networkx.node_connected_component",
"networkx.is_tree"
] |
[((4470, 4483), 'networkx.is_tree', 'nx.is_tree', (['G'], {}), '(G)\n', (4480, 4483), True, 'import networkx as nx\n'), ((8439, 8472), 'networkx.drawing.nx_pydot.pydot_layout', 'pydot_layout', (['network'], {'prog': '"""dot"""'}), "(network, prog='dot')\n", (8451, 8472), False, 'from networkx.drawing.nx_pydot import pydot_layout\n'), ((4673, 4695), 'networkx.topological_sort', 'nx.topological_sort', (['G'], {}), '(G)\n', (4692, 4695), True, 'import networkx as nx\n'), ((6978, 7001), 'networkx.descendants', 'nx.descendants', (['G', 'root'], {}), '(G, root)\n', (6992, 7001), True, 'import networkx as nx\n'), ((7162, 7198), 'networkx.node_connected_component', 'nx.node_connected_component', (['G', 'root'], {}), '(G, root)\n', (7189, 7198), True, 'import networkx as nx\n')]
|
from __future__ import annotations
import operator
from typing import Any, Collection, Dict, Literal, Optional, Union
import cytoolz
from .. import errors, types
from . import basics
WeightingType = Literal["count", "freq", "binary"]
SpanGroupByType = Literal["lemma", "lemma_", "lower", "lower_", "orth", "orth_"]
TokenGroupByType = Union[SpanGroupByType, Literal["norm", "norm_"]]
def to_bag_of_words(
doclike: types.DocLike,
*,
by: TokenGroupByType = "lemma_",
weighting: WeightingType = "count",
**kwargs,
) -> Dict[int, int | float] | Dict[str, int | float]:
"""
Transform a ``Doc`` or ``Span`` into a bag-of-words: the set of unique words therein
mapped to their absolute, relative, or binary frequencies of occurrence.
Args:
doclike
by: Attribute by which spaCy ``Token`` s are grouped before counting,
as given by ``getattr(token, by)``.
If "lemma", tokens are grouped by their base form w/o inflectional suffixes;
if "lower", by the lowercase form of the token text;
if "norm", by the normalized form of the token text;
if "orth", by the token text exactly as it appears in ``doc``.
To output keys as strings, simply append an underscore to any of these;
for example, "lemma_" creates a bag whose keys are token lemmas as strings.
weighting: Type of weighting to assign to unique words given by ``by``.
If "count", weights are the absolute number of occurrences (i.e. counts);
if "freq", weights are counts normalized by the total token count,
giving their relative frequency of occurrence;
if "binary", weights are set equal to 1.
**kwargs: Passed directly on to :func:`textacy.extract.words()`
- filter_stops: If True, stop words are removed before counting.
- filter_punct: If True, punctuation tokens are removed before counting.
- filter_nums: If True, number-like tokens are removed before counting.
Returns:
Mapping of a unique word id or string (depending on the value of ``by``)
to its absolute, relative, or binary frequency of occurrence
(depending on the value of ``weighting``).
Note:
For "freq" weighting, the resulting set of frequencies won't (necessarily) sum
to 1.0, since all tokens are used when normalizing counts but some (punctuation,
stop words, etc.) may be filtered out of the bag afterwards.
See Also:
:func:`textacy.extract.words()`
"""
words = basics.words(doclike, **kwargs)
bow = cytoolz.recipes.countby(operator.attrgetter(by), words)
bow = _reweight_bag(weighting, bow, doclike)
return bow
def to_bag_of_terms(
doclike: types.DocLike,
*,
by: SpanGroupByType = "lemma_",
weighting: WeightingType = "count",
ngs: Optional[int | Collection[int] | types.DocLikeToSpans] = None,
ents: Optional[bool | types.DocLikeToSpans] = None,
ncs: Optional[bool | types.DocLikeToSpans] = None,
dedupe: bool = True,
) -> Dict[str, int] | Dict[str, float]:
"""
Transform a ``Doc`` or ``Span`` into a bag-of-terms: the set of unique terms therein
mapped to their absolute, relative, or binary frequencies of occurrence,
where "terms" may be a combination of n-grams, entities, and/or noun chunks.
Args:
doclike
by: Attribute by which spaCy ``Span`` s are grouped before counting,
as given by ``getattr(token, by)``.
If "lemma", tokens are counted by their base form w/o inflectional suffixes;
if "lower", by the lowercase form of the token text;
if "orth", by the token text exactly as it appears in ``doc``.
To output keys as strings, simply append an underscore to any of these;
for example, "lemma_" creates a bag whose keys are token lemmas as strings.
weighting: Type of weighting to assign to unique terms given by ``by``.
If "count", weights are the absolute number of occurrences (i.e. counts);
if "freq", weights are counts normalized by the total token count,
giving their relative frequency of occurrence;
if "binary", weights are set equal to 1.
ngs: N-gram terms to be extracted.
If one or multiple ints, :func:`textacy.extract.ngrams(doclike, n=ngs)` is
used to extract terms; if a callable, ``ngs(doclike)`` is used to extract
terms; if None, no n-gram terms are extracted.
ents: Entity terms to be extracted.
If True, :func:`textacy.extract.entities(doclike)` is used to extract terms;
if a callable, ``ents(doclike)`` is used to extract terms;
if None, no entity terms are extracted.
ncs: Noun chunk terms to be extracted.
If True, :func:`textacy.extract.noun_chunks(doclike)` is used to extract
terms; if a callable, ``ncs(doclike)`` is used to extract terms;
if None, no noun chunk terms are extracted.
dedupe: If True, deduplicate terms whose spans are extracted by multiple types
(e.g. a span that is both an n-gram and an entity), as identified by
identical (start, stop) indexes in ``doclike``; otherwise, don't.
Returns:
Mapping of a unique term id or string (depending on the value of ``by``)
to its absolute, relative, or binary frequency of occurrence
(depending on the value of ``weighting``).
See Also:
:func:`textacy.extract.terms()`
"""
terms = basics.terms(doclike, ngs=ngs, ents=ents, ncs=ncs, dedupe=dedupe)
# spaCy made some awkward changes in the Span API, making it harder to get int ids
# and adding inconsistencies with equivalent Token API
# so, here are some hacks to spare users that annoyance
if by.startswith("lower"):
bot = cytoolz.recipes.countby(lambda span: span.text.lower(), terms)
else:
by_ = by if by.endswith("_") else f"{by}_"
bot = cytoolz.recipes.countby(operator.attrgetter(by_), terms)
# if needed, here we take the term strings and manually convert back to ints
if not by.endswith("_"):
ss = doclike.vocab.strings
bot = {ss.add(term_str): weight for term_str, weight in bot.items()}
bot = _reweight_bag(weighting, bot, doclike)
return bot
def _reweight_bag(
weighting: WeightingType, bag: Dict[Any, int], doclike: types.DocLike
) -> Dict[Any, int] | Dict[Any, float]:
if weighting == "count":
return bag
elif weighting == "freq":
n_tokens = len(doclike)
return {term: weight / n_tokens for term, weight in bag.items()}
elif weighting == "binary":
return {term: 1 for term in bag.keys()}
else:
raise ValueError(
errors.value_invalid_msg("weighting", weighting, {"count", "freq", "binary"})
)
|
[
"operator.attrgetter"
] |
[((2670, 2693), 'operator.attrgetter', 'operator.attrgetter', (['by'], {}), '(by)\n', (2689, 2693), False, 'import operator\n'), ((6124, 6148), 'operator.attrgetter', 'operator.attrgetter', (['by_'], {}), '(by_)\n', (6143, 6148), False, 'import operator\n')]
|
'''
Author : <NAME>
Description :
-------------
The following code lets you click on a set of points and then create a curve that fits the set of points.
In order to execute this code, you need to install bokeh,
'''
from bokeh.io import curdoc
from bokeh.plotting import figure, output_file
from bokeh.layouts import column,row
from bokeh.models import ColumnDataSource
from bokeh.models import PointDrawTool
from bokeh.models import Button
from bokeh.events import DoubleTap
from scipy.spatial import ConvexHull
import numpy as np
from scipy import interpolate
from os import sys
from os import system
print('START OF THE PROGRAM')
try:
print(sys.argv)
if len(sys.argv)<=1:
command='bokeh serve --show draggablepoints.py --args d1 d2 d3'
if system(command) == 0:
pass
else:
print('Error occured in running the program')
exit()
except:
print('Error in system command (may be)')
exit()
finally:
pass
# Create a plot
fig = figure( title="CAD/Curves/Curve Fit",
plot_width=800,
plot_height=500,
x_range=(-5, 5),
y_range=(-5, 5)
)
fig.title.text_font_size='24pt'
fig.title.text_color='blue'
# Create some data sources
xydict=dict(x=[],y=[])
psource = ColumnDataSource(data=xydict)
csource = ColumnDataSource(data=xydict)
hsource = ColumnDataSource(data=xydict)
# Create some glyphs
lines = fig.line('x', 'y', source=psource)
vertices = fig.square('x', 'y', source=psource,size=10)
curved = fig.line('x','y',source=csource, color='red')
hullbnd = fig.patch('x','y',
source=hsource,
fill_color='red',
fill_alpha=0.1)
# curve fitting/interpolation
def curvefit():
print('Interpolation')
xarr=np.array(psource.data['x'])
yarr=np.array(psource.data['y'])
f=interpolate.interp1d(xarr,yarr,kind='cubic')
x1arr=np.linspace(xarr.min(),xarr.max(),100)
y1arr=f(x1arr)
csource.data['x']=x1arr.tolist()
csource.data['y']=y1arr.tolist()
# constructing a convex hull
def conhull():
xarr=np.array(psource.data['x'])
yarr=np.array(psource.data['y'])
pt=np.array([xarr,yarr])
hull=ConvexHull(pt.T)
bnd=np.append(hull.vertices,hull.vertices[0])
hsource.data['x']=pt.T[bnd,0].tolist()
hsource.data['y']=pt.T[bnd,1].tolist()
# clear all
def clearall():
psource.data['x']=[]
psource.data['y']=[]
csource.data['x']=[]
csource.data['y']=[]
hsource.data['x']=[]
hsource.data['y']=[]
##################################################################
def callback(event):
curvefit()
conhull()
fig.on_event(DoubleTap,callback)
##################################################################
xbutton=Button(label='Exit')
def xbutton_func():
exit()
xbutton.on_click(xbutton_func)
###################################################################
cfitbutton=Button(label='Curve Fit')
def cfit_func():
curvefit()
cfitbutton.on_click(cfit_func)
#########################################################
hullbutton=Button(label='Show Convex Hull')
def hullbutton_func():
conhull()
hullbutton.on_click(hullbutton_func)
##########################################################
wipebutton=Button(label='Clear all points')
def wipe_func():
clearall()
wipebutton.on_click(wipe_func)
##########################################################
brow1 = row(cfitbutton,hullbutton)
brow2 = row(wipebutton,xbutton)
layout = column(fig,brow1,brow2)
pointdrawtool = PointDrawTool(renderers=[vertices,lines,curved,hullbnd])
fig.add_tools(pointdrawtool)
curdoc().add_root(layout)
'''
Example :
---------
save the
'''
########################################################################
########################################################################
# End of File
|
[
"bokeh.models.ColumnDataSource",
"bokeh.models.PointDrawTool",
"bokeh.plotting.figure",
"bokeh.models.Button",
"os.system",
"numpy.append",
"bokeh.io.curdoc",
"numpy.array",
"bokeh.layouts.column",
"scipy.interpolate.interp1d",
"scipy.spatial.ConvexHull",
"bokeh.layouts.row"
] |
[((1052, 1159), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""CAD/Curves/Curve Fit"""', 'plot_width': '(800)', 'plot_height': '(500)', 'x_range': '(-5, 5)', 'y_range': '(-5, 5)'}), "(title='CAD/Curves/Curve Fit', plot_width=800, plot_height=500,\n x_range=(-5, 5), y_range=(-5, 5))\n", (1058, 1159), False, 'from bokeh.plotting import figure, output_file\n'), ((1360, 1389), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'xydict'}), '(data=xydict)\n', (1376, 1389), False, 'from bokeh.models import ColumnDataSource\n'), ((1401, 1430), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'xydict'}), '(data=xydict)\n', (1417, 1430), False, 'from bokeh.models import ColumnDataSource\n'), ((1442, 1471), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'xydict'}), '(data=xydict)\n', (1458, 1471), False, 'from bokeh.models import ColumnDataSource\n'), ((2941, 2961), 'bokeh.models.Button', 'Button', ([], {'label': '"""Exit"""'}), "(label='Exit')\n", (2947, 2961), False, 'from bokeh.models import Button\n'), ((3114, 3139), 'bokeh.models.Button', 'Button', ([], {'label': '"""Curve Fit"""'}), "(label='Curve Fit')\n", (3120, 3139), False, 'from bokeh.models import Button\n'), ((3289, 3321), 'bokeh.models.Button', 'Button', ([], {'label': '"""Show Convex Hull"""'}), "(label='Show Convex Hull')\n", (3295, 3321), False, 'from bokeh.models import Button\n'), ((3481, 3513), 'bokeh.models.Button', 'Button', ([], {'label': '"""Clear all points"""'}), "(label='Clear all points')\n", (3487, 3513), False, 'from bokeh.models import Button\n'), ((3671, 3698), 'bokeh.layouts.row', 'row', (['cfitbutton', 'hullbutton'], {}), '(cfitbutton, hullbutton)\n', (3674, 3698), False, 'from bokeh.layouts import column, row\n'), ((3707, 3731), 'bokeh.layouts.row', 'row', (['wipebutton', 'xbutton'], {}), '(wipebutton, xbutton)\n', (3710, 3731), False, 'from bokeh.layouts import column, row\n'), ((3741, 3766), 'bokeh.layouts.column', 'column', (['fig', 'brow1', 'brow2'], {}), '(fig, brow1, brow2)\n', (3747, 3766), False, 'from bokeh.layouts import column, row\n'), ((3784, 3843), 'bokeh.models.PointDrawTool', 'PointDrawTool', ([], {'renderers': '[vertices, lines, curved, hullbnd]'}), '(renderers=[vertices, lines, curved, hullbnd])\n', (3797, 3843), False, 'from bokeh.models import PointDrawTool\n'), ((1914, 1941), 'numpy.array', 'np.array', (["psource.data['x']"], {}), "(psource.data['x'])\n", (1922, 1941), True, 'import numpy as np\n'), ((1952, 1979), 'numpy.array', 'np.array', (["psource.data['y']"], {}), "(psource.data['y'])\n", (1960, 1979), True, 'import numpy as np\n'), ((1987, 2033), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['xarr', 'yarr'], {'kind': '"""cubic"""'}), "(xarr, yarr, kind='cubic')\n", (2007, 2033), False, 'from scipy import interpolate\n'), ((2242, 2269), 'numpy.array', 'np.array', (["psource.data['x']"], {}), "(psource.data['x'])\n", (2250, 2269), True, 'import numpy as np\n'), ((2280, 2307), 'numpy.array', 'np.array', (["psource.data['y']"], {}), "(psource.data['y'])\n", (2288, 2307), True, 'import numpy as np\n'), ((2316, 2338), 'numpy.array', 'np.array', (['[xarr, yarr]'], {}), '([xarr, yarr])\n', (2324, 2338), True, 'import numpy as np\n'), ((2348, 2364), 'scipy.spatial.ConvexHull', 'ConvexHull', (['pt.T'], {}), '(pt.T)\n', (2358, 2364), False, 'from scipy.spatial import ConvexHull\n'), ((2374, 2416), 'numpy.append', 'np.append', (['hull.vertices', 'hull.vertices[0]'], {}), '(hull.vertices, hull.vertices[0])\n', (2383, 2416), True, 'import numpy as np\n'), ((3876, 3884), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (3882, 3884), False, 'from bokeh.io import curdoc\n'), ((797, 812), 'os.system', 'system', (['command'], {}), '(command)\n', (803, 812), False, 'from os import system\n')]
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Github organization collaborators fetcher."""
import json
from arboretum.common.constants import GH_ALL_COLLABORATORS
from compliance.evidence import DAY, RawEvidence, raw_evidence
from compliance.fetch import ComplianceFetcher
from compliance.utils.data_parse import get_sha256_hash
from compliance.utils.services.github import Github
class GithubOrgCollaboratorsFetcher(ComplianceFetcher):
"""Fetch collaborators from GH organization repositories."""
@classmethod
def setUpClass(cls):
"""Initialize the fetcher object with configuration settings."""
cls.gh_pool = {}
return cls
def fetch_gh_org_collaborators(self):
"""Fetch collaborators from GH organization repositories."""
for config in self.config.get('org.permissions.org_integrity.orgs'):
host, org = config['url'].rsplit('/', 1)
for aff in config.get('collaborator_types', GH_ALL_COLLABORATORS):
url_hash = get_sha256_hash([config['url']], 10)
json_file = f'gh_{aff}_collaborators_{url_hash}.json'
path = ['permissions', json_file]
description = (
f'{aff.title()} collaborators of the {org} GH org'
)
self.config.add_evidences(
[RawEvidence(path[1], path[0], DAY, description)]
)
with raw_evidence(self.locker, '/'.join(path)) as evidence:
if evidence:
if host not in self.gh_pool:
self.gh_pool[host] = Github(base_url=host)
if not config.get('repos'):
repos = self.gh_pool[host].paginate_api(
f'orgs/{org}/repos'
)
config['repos'] = [repo['name'] for repo in repos]
collabs = {}
for repo in config['repos']:
collabs_url = f'repos/{org}/{repo}/collaborators'
collabs[repo] = self.gh_pool[host].paginate_api(
collabs_url, affiliation=aff
)
evidence.set_content(json.dumps(collabs))
|
[
"compliance.utils.data_parse.get_sha256_hash",
"compliance.utils.services.github.Github",
"compliance.evidence.RawEvidence",
"json.dumps"
] |
[((1607, 1643), 'compliance.utils.data_parse.get_sha256_hash', 'get_sha256_hash', (["[config['url']]", '(10)'], {}), "([config['url']], 10)\n", (1622, 1643), False, 'from compliance.utils.data_parse import get_sha256_hash\n'), ((1949, 1996), 'compliance.evidence.RawEvidence', 'RawEvidence', (['path[1]', 'path[0]', 'DAY', 'description'], {}), '(path[1], path[0], DAY, description)\n', (1960, 1996), False, 'from compliance.evidence import DAY, RawEvidence, raw_evidence\n'), ((2227, 2248), 'compliance.utils.services.github.Github', 'Github', ([], {'base_url': 'host'}), '(base_url=host)\n', (2233, 2248), False, 'from compliance.utils.services.github import Github\n'), ((2912, 2931), 'json.dumps', 'json.dumps', (['collabs'], {}), '(collabs)\n', (2922, 2931), False, 'import json\n')]
|
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright (c) 2021, AdW Project'
import numpy as np
from scipy.stats import kstest
from copulas import get_instance
def select_univariate(X, candidates, margin_fit_method='AIC'):
best_mesure = np.inf
best_model = None
for model in candidates:
try:
instance = get_instance(model)
fitted_params = instance.fit(X)
if (margin_fit_method == 'AIC'):
measure = fitting_with_aic(X, instance, fitted_params)
else:
measure = fitting_with_ks(X, instance)
print(model, measure)
if measure < best_mesure:
best_mesure = measure
best_model = model
except ValueError:
# Distribution not supported
pass
best_instance = get_instance(best_model)
return best_instance
def fitting_with_aic(X, instance, fitted_params):
k = len(fitted_params)
logLik = np.ma.masked_invalid(instance.log_probability_density(X)).sum()
aic = 2 * k - 2 * (logLik)
return aic
def fitting_with_ks(X, instance):
ks, _ = kstest(X, instance.cdf)
return ks
|
[
"scipy.stats.kstest",
"copulas.get_instance"
] |
[((864, 888), 'copulas.get_instance', 'get_instance', (['best_model'], {}), '(best_model)\n', (876, 888), False, 'from copulas import get_instance\n'), ((1168, 1191), 'scipy.stats.kstest', 'kstest', (['X', 'instance.cdf'], {}), '(X, instance.cdf)\n', (1174, 1191), False, 'from scipy.stats import kstest\n'), ((357, 376), 'copulas.get_instance', 'get_instance', (['model'], {}), '(model)\n', (369, 376), False, 'from copulas import get_instance\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from .GetDataAvailability import GetDataAvailability
import DateTimeTools as TT
months = ['J','F','M','A','M','J','J','A','S','O','N','D']
def PlotDataAvailability(Stations,Date,fig=None,maps=[1,1,0,0]):
#get the data availability for each station
ns = len(Stations)
for i in range(0,ns):
d,e = GetDataAvailability(Stations[i],Date)
if i == 0:
nd = d.size
x = d
grid = np.zeros((ns,nd),dtype='float32')
grid[i] = np.float32(e)
#Get the x-axis and y-axis
xe = np.arange(nd+1)*1.0
ye = np.arange(ns+1)*1.0
xg,yg = np.meshgrid(xe,ye)
#get axis limits
xlim = [0,nd]
ylim = [0,ns]
#get all of the years and months
yr,mn,dy = TT.DateSplit(x)
#determine where the ticks go
mtick = np.where((dy == 1))[0]
ytick = np.where((dy == 1) & (mn == 1))[0]
if ytick.size > 2:
#use yearly ticks
xticks = xe[ytick]
xticklabels = ['{:04d}'.format(yr[yt]) for yt in ytick]
else:
xticks = xe[mtick]
xticklabels = []
for i in range(0,mtick.size):
if mn[mtick[i]] == 1:
tmp = '{:s}\n{:04d}'.format(months[mn[mtick[i]]-1],yr[mtick[i]])
else:
tmp = '{:s}'.format(months[mn[mtick[i]]-1])
xticklabels.append(tmp)
yticks = ye
yticklabels = ['']*(ns+1)
#get the scale
scale = [0.0,1.0]
#set norm
norm = colors.Normalize(vmin=scale[0],vmax=scale[1])
if fig is None:
fig = plt
fig.figure()
if hasattr(fig,'Axes'):
ax = fig.subplot2grid((maps[1],maps[0]),(maps[3],maps[2]))
else:
ax = fig
sm = ax.pcolormesh(xg,yg,grid,cmap='RdYlGn',norm=norm,zorder=1.0)
#set limits
ax.set_xlim(xlim)
ax.set_ylim(ylim)
#set ticks
ax.xaxis.set_ticks(xticks)
ax.xaxis.set_ticklabels(xticklabels)
ax.yaxis.set_ticks(yticks)
ax.yaxis.set_ticklabels(yticklabels)
for i in range(0,ns):
ax.text(-0.03,(0.5 + i)/ns,Stations[i].upper(),ha='center',va='center',transform=ax.transAxes)
#plot a grid
#horizontal lines between stations
ax.hlines(ye,xlim[0],xlim[1],color=[0.0,0.0,0.0],zorder=4)
#vertical lines every year
ax.vlines(xe[ytick],ylim[0],ylim[1],color=[0.0,0.0,0.0],zorder=4,lw=2.0)
if ytick.size <= 5:
#vertical lines every month
ax.vlines(xe[mtick],ylim[0],ylim[1],color=[0.0,0.0,0.0],zorder=4,linestyle=':')
return ax
|
[
"numpy.meshgrid",
"matplotlib.colors.Normalize",
"numpy.float32",
"numpy.zeros",
"numpy.where",
"numpy.arange",
"DateTimeTools.DateSplit"
] |
[((684, 703), 'numpy.meshgrid', 'np.meshgrid', (['xe', 'ye'], {}), '(xe, ye)\n', (695, 703), True, 'import numpy as np\n'), ((800, 815), 'DateTimeTools.DateSplit', 'TT.DateSplit', (['x'], {}), '(x)\n', (812, 815), True, 'import DateTimeTools as TT\n'), ((1404, 1450), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': 'scale[0]', 'vmax': 'scale[1]'}), '(vmin=scale[0], vmax=scale[1])\n', (1420, 1450), True, 'import matplotlib.colors as colors\n'), ((578, 591), 'numpy.float32', 'np.float32', (['e'], {}), '(e)\n', (588, 591), True, 'import numpy as np\n'), ((628, 645), 'numpy.arange', 'np.arange', (['(nd + 1)'], {}), '(nd + 1)\n', (637, 645), True, 'import numpy as np\n'), ((654, 671), 'numpy.arange', 'np.arange', (['(ns + 1)'], {}), '(ns + 1)\n', (663, 671), True, 'import numpy as np\n'), ((859, 876), 'numpy.where', 'np.where', (['(dy == 1)'], {}), '(dy == 1)\n', (867, 876), True, 'import numpy as np\n'), ((891, 922), 'numpy.where', 'np.where', (['((dy == 1) & (mn == 1))'], {}), '((dy == 1) & (mn == 1))\n', (899, 922), True, 'import numpy as np\n'), ((532, 567), 'numpy.zeros', 'np.zeros', (['(ns, nd)'], {'dtype': '"""float32"""'}), "((ns, nd), dtype='float32')\n", (540, 567), True, 'import numpy as np\n')]
|
'''
This creates a 3D pyramid pattern of blocks.
@author: MKC
'''
from karelcraft.karelcraft import *
import random
TEXTURES = ('grass', 'stone', 'brick', 'dirt', 'lava', 'rose',
'dlsu', 'diamond', 'emerald', 'gold', 'obsidian',
'leaves', 'sand', 'wood', 'stonebrick', 'sponge', 'snow')
def turn_around():
turn_left()
turn_left()
def turn_right():
turn_left()
turn_left()
turn_left()
def outer_layer():
texture = random.choice(TEXTURES)
while front_is_clear() and no_block_present():
put_block(texture)
move()
if front_is_blocked():
turn_left()
def move_inward():
'''
pre: Karel at initial position (facing East) with colored perimeter
post: Karel moves into the inner layer
'''
move()
turn_left()
move()
turn_right()
def step_back():
'''
Move one step back
'''
turn_around()
move()
turn_around()
def inner_layers():
num = 1
while no_block_present():
num += 1
texture = random.choice(TEXTURES)
for _ in range(4):
while no_block_present():
for _ in range(num):
put_block(texture)
move()
if block_present():
step_back()
turn_left()
if front_is_clear():
move()
def main():
outer_layer()
move_inward()
inner_layers()
if __name__ == "__main__":
run_karel_program('7x7')
|
[
"random.choice"
] |
[((470, 493), 'random.choice', 'random.choice', (['TEXTURES'], {}), '(TEXTURES)\n', (483, 493), False, 'import random\n'), ((1053, 1076), 'random.choice', 'random.choice', (['TEXTURES'], {}), '(TEXTURES)\n', (1066, 1076), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
"""
Created on 23/11/17
Author : <NAME>
Project definitions
"""
import os
import getpass
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
import astropy.units as u
from dustmaps.config import config
from dustmaps import sfd
if getpass.getuser() == "kadu":
home = "/home/kadu/Dropbox/ngc1487"
elif getpass.getuser() == "luisabuzzo":
home = "/home/luisabuzzo/Work/Master/NGC1487"
data_dir = os.path.join(home, "data")
config['data_dir'] = os.path.join(data_dir, "dustmaps")
if not os.path.exists(config["data_dir"]): # Just to run once in my example
sfd.fetch() # Specific for Schlafy and Finkbeiner (2011), which is an
# updated version of the popular Schlegel, Finkbeiner & Davis (1998) maps
obs = ["cube1","cube2"]
# Constants
D = 10.1 # Distance to the center of the Hydra I cluster in Mpc
DL = 12.2# Luminosity distance
velscale = 50. # Set velocity scale for pPXF related routines
V = 848.0 # km/s
#w1 = 4500
#w2 = 10000
# Properties of the system
ra0 = 58.942083 * u.degree
dec0 = -42.368056 * u.degree
# Get color excess
coords = SkyCoord(ra0, dec0)
sfq = sfd.SFDQuery()
ebv = sfq(coords)
Rv = 3.1 # Constant in our galaxy
Av = ebv * Rv
# Matplotlib settings
plt.style.context("seaborn-paper")
plt.rcParams["text.usetex"] = True
plt.rcParams["font.family"] = "serif"
plt.rcParams['font.serif'] = 'Computer Modern'
plt.rcParams["xtick.direction"] = "in"
plt.rcParams["ytick.direction"] = "in"
plt.rcParams["xtick.minor.visible"] = True
plt.rcParams["ytick.minor.visible"] = True
plt.rcParams["xtick.top"] = True
plt.rcParams["ytick.right"] = True
def get_field_files(observations):
""" Returns the names of the image and cube associated with a given
field. """
wdir = os.path.join(home, "data/MUSE")
if observations == "cube1":
img = "ADP.2017-11-20T16_23_13.682.fits"
cube = "ADP.2017-11-20T16_23_13.681.fits"
elif observations == "cube2":
img = "ADP.2017-11-20T16_23_13.729.fits"
cube = "ADP.2017-11-20T16_23_13.728.fits"
cube = os.path.join(wdir, cube)
img = os.path.join(wdir, img)
if not os.path.exists(cube):
cube = cube.replace("_", ":")
img = img.replace("_", ":")
return img, cube
# Emission lines used in the projects
def get_emission_lines():
""" Returns dictionaries containing the emission lines to be used. """
lines = (("Hbeta_4861", 4861.333), ("OIII_4959", 4958.91),
("OIII_5007", 5006.84), ("NII_6550", 6549.86),
("Halpha_6565", 6564.61), ("NII_6585", 6585.27),
("SII_6718", 6718.29), ("SII_6733", 6732.67))
return lines
|
[
"getpass.getuser",
"os.path.join",
"dustmaps.sfd.SFDQuery",
"os.path.exists",
"matplotlib.pyplot.style.context",
"dustmaps.sfd.fetch",
"astropy.coordinates.SkyCoord"
] |
[((453, 479), 'os.path.join', 'os.path.join', (['home', '"""data"""'], {}), "(home, 'data')\n", (465, 479), False, 'import os\n'), ((502, 536), 'os.path.join', 'os.path.join', (['data_dir', '"""dustmaps"""'], {}), "(data_dir, 'dustmaps')\n", (514, 536), False, 'import os\n'), ((1113, 1132), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra0', 'dec0'], {}), '(ra0, dec0)\n', (1121, 1132), False, 'from astropy.coordinates import SkyCoord\n'), ((1139, 1153), 'dustmaps.sfd.SFDQuery', 'sfd.SFDQuery', ([], {}), '()\n', (1151, 1153), False, 'from dustmaps import sfd\n'), ((1244, 1278), 'matplotlib.pyplot.style.context', 'plt.style.context', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (1261, 1278), True, 'import matplotlib.pyplot as plt\n'), ((282, 299), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (297, 299), False, 'import getpass\n'), ((544, 578), 'os.path.exists', 'os.path.exists', (["config['data_dir']"], {}), "(config['data_dir'])\n", (558, 578), False, 'import os\n'), ((617, 628), 'dustmaps.sfd.fetch', 'sfd.fetch', ([], {}), '()\n', (626, 628), False, 'from dustmaps import sfd\n'), ((1765, 1796), 'os.path.join', 'os.path.join', (['home', '"""data/MUSE"""'], {}), "(home, 'data/MUSE')\n", (1777, 1796), False, 'import os\n'), ((2072, 2096), 'os.path.join', 'os.path.join', (['wdir', 'cube'], {}), '(wdir, cube)\n', (2084, 2096), False, 'import os\n'), ((2107, 2130), 'os.path.join', 'os.path.join', (['wdir', 'img'], {}), '(wdir, img)\n', (2119, 2130), False, 'import os\n'), ((356, 373), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (371, 373), False, 'import getpass\n'), ((2142, 2162), 'os.path.exists', 'os.path.exists', (['cube'], {}), '(cube)\n', (2156, 2162), False, 'import os\n')]
|
from ..molecule import Molecule
import path
class Linear(path.Path):
""" A linear interpolator that generates n-2 new molecules
"""
def __init__(self, initial, final, nsteps=10):
path.Path.__init__(self)
assert isinstance(nsteps, int)
self._molecules = [initial]
ci = initial.getCoordinates()
cf = final.getCoordinates()
delta = (cf - ci) / (nsteps - 1)
# only generate the inner range
for k in range(1, nsteps-1):
m2 = Molecule.fromMolecule(initial)
m2.setCoordinates(ci + k*delta)
self._molecules.append(m2)
self._molecules.append(final)
assert self.getNumBeads() == nsteps
|
[
"path.Path.__init__"
] |
[((201, 225), 'path.Path.__init__', 'path.Path.__init__', (['self'], {}), '(self)\n', (219, 225), False, 'import path\n')]
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import six.moves.urllib.parse as urlparse
import glance.db as db_api
from glance.i18n import _LE, _LW
from glance import scrubber
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config'])
def safe_delete_from_backend(context, image_id, location):
"""
Given a location, delete an image from the store and
update location status to db.
This function try to handle all known exceptions which might be raised
by those calls on store and DB modules in its implementation.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
try:
ret = store_api.delete_from_backend(location['url'], context=context)
location['status'] = 'deleted'
if 'id' in location:
db_api.get_api().image_location_delete(context, image_id,
location['id'], 'deleted')
return ret
except store_api.NotFound:
msg = _LW('Failed to delete image %s in store from URI') % image_id
LOG.warn(msg)
except store_api.StoreDeleteNotSupported as e:
LOG.warn(encodeutils.exception_to_unicode(e))
except store_api.UnsupportedBackend:
exc_type = sys.exc_info()[0].__name__
msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') %
dict(image_id=image_id, exc=exc_type))
LOG.error(msg)
def schedule_delayed_delete_from_backend(context, image_id, location):
"""
Given a location, schedule the deletion of an image location and
update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
db_queue = scrubber.get_scrub_queue()
if not CONF.use_user_token:
context = None
ret = db_queue.add_location(image_id, location)
if ret:
location['status'] = 'pending_delete'
if 'id' in location:
# NOTE(zhiyan): New added image location entry will has no 'id'
# field since it has not been saved to DB.
db_api.get_api().image_location_delete(context, image_id,
location['id'],
'pending_delete')
else:
db_api.get_api().image_location_add(context, image_id, location)
return ret
def delete_image_location_from_backend(context, image_id, location):
"""
Given a location, immediately or schedule the deletion of an image
location and update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
deleted = False
if CONF.delayed_delete:
deleted = schedule_delayed_delete_from_backend(context,
image_id, location)
if not deleted:
# NOTE(zhiyan) If image metadata has not been saved to DB
# such as uploading process failure then we can't use
# location status mechanism to support image pending delete.
safe_delete_from_backend(context, image_id, location)
def validate_external_location(uri):
"""
Validate if URI of external location are supported.
Only over non-local store types are OK, i.e. S3, Swift,
HTTP. Note the absence of 'file://' for security reasons,
see LP bug #942118, 1400966, 'swift+config://' is also
absent for security reasons, see LP bug #1334196.
:param uri: The URI of external image location.
:returns: Whether given URI of external image location are OK.
"""
if not uri:
return False
# TODO(zhiyan): This function could be moved to glance_store.
# TODO(gm): Use a whitelist of allowed schemes
scheme = urlparse.urlparse(uri).scheme
return (scheme in store_api.get_known_schemes() and
scheme not in RESTRICTED_URI_SCHEMAS)
|
[
"glance.i18n._LW",
"glance.scrubber.get_scrub_queue",
"oslo_log.log.getLogger",
"oslo_utils.encodeutils.exception_to_unicode",
"glance.i18n._LE",
"glance.db.get_api",
"glance_store.delete_from_backend",
"glance_store.get_known_schemes",
"sys.exc_info",
"six.moves.urllib.parse.urlparse"
] |
[((887, 914), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (904, 914), True, 'from oslo_log import log as logging\n'), ((2585, 2611), 'glance.scrubber.get_scrub_queue', 'scrubber.get_scrub_queue', ([], {}), '()\n', (2609, 2611), False, 'from glance import scrubber\n'), ((1471, 1534), 'glance_store.delete_from_backend', 'store_api.delete_from_backend', (["location['url']"], {'context': 'context'}), "(location['url'], context=context)\n", (1500, 1534), True, 'import glance_store as store_api\n'), ((4688, 4710), 'six.moves.urllib.parse.urlparse', 'urlparse.urlparse', (['uri'], {}), '(uri)\n', (4705, 4710), True, 'import six.moves.urllib.parse as urlparse\n'), ((4740, 4769), 'glance_store.get_known_schemes', 'store_api.get_known_schemes', ([], {}), '()\n', (4767, 4769), True, 'import glance_store as store_api\n'), ((1815, 1865), 'glance.i18n._LW', '_LW', (['"""Failed to delete image %s in store from URI"""'], {}), "('Failed to delete image %s in store from URI')\n", (1818, 1865), False, 'from glance.i18n import _LE, _LW\n'), ((1967, 2002), 'oslo_utils.encodeutils.exception_to_unicode', 'encodeutils.exception_to_unicode', (['e'], {}), '(e)\n', (1999, 2002), False, 'from oslo_utils import encodeutils\n'), ((2106, 2168), 'glance.i18n._LE', '_LE', (['"""Failed to delete image %(image_id)s from store: %(exc)s"""'], {}), "('Failed to delete image %(image_id)s from store: %(exc)s')\n", (2109, 2168), False, 'from glance.i18n import _LE, _LW\n'), ((1615, 1631), 'glance.db.get_api', 'db_api.get_api', ([], {}), '()\n', (1629, 1631), True, 'import glance.db as db_api\n'), ((2064, 2078), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (2076, 2078), False, 'import sys\n'), ((2951, 2967), 'glance.db.get_api', 'db_api.get_api', ([], {}), '()\n', (2965, 2967), True, 'import glance.db as db_api\n'), ((3171, 3187), 'glance.db.get_api', 'db_api.get_api', ([], {}), '()\n', (3185, 3187), True, 'import glance.db as db_api\n')]
|
# steps to preprocess squad files
# 1. read squad json file, extract all context, write to file, one context per line
# 2. use corenlp to process the above file, write to a new annotated file
# 3. rea the annotated json file; for each context, create a vector of len(#words in context), indicate the sentence
# idx of that word (sentence segmentation)
# 4. read the DrQA annotated squad file, add the sentence segmentation info into the data structure
# 5. use the answer token index vector for each question in the DrQA annotated squad file, tag each word in the context
# to be either A (answer word) or O (not answer word)
# 6. loop through each word in the context, tag each word to be either U (upper case) or L (lower case)
#
# 7. select an appropriate truncation level. could be sentence level, or 2 sentence, or N sentence, all the way up to
# paragraph (for input context truncation), based on
# 8. based on the truncation level, truncate all context related vectors based on inch sentence index the answer appears
# and how many sentences to select (all based on the sentence index information produced from step 4). these vectors
# are: ner, pos, case tag, answer position info, context itself.
# 8. for the selected DrQA annotated file, output the following files:
# a) lower case space seperated question sequence
# b) ner
# c) pos
# d) case tag
# e) answer position info
# f) context
# g) answer text
# set the level of truncation
import argparse
from pdb import set_trace
import codecs
from copy import deepcopy
import json
from os import listdir
from os.path import isfile, join
from os import mkdir
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-num_sents', default='all',
help='number of sentences to select for the document')
parser.add_argument('-fname', default='content_txt_3.txt',
help='file to process')
parser.add_argument('-subject', type=str)
args = parser.parse_args()
num_sents = args.num_sents
subject = args.subject
if num_sents != 'all':
num_sents = int(num_sents)
fname = args.fname
# set_trace()
# first load the patterns
with open('ans.patterns.txt', 'r') as f:
patterns = json.load(f)['data']
# load the terms file
with open('/home/jack/Documents/openstaxTextbook/'+subject+'/terms.txt') as f:
terms = json.load(f)
allTerms = []
for key in terms.keys():
allTerms += terms[key]
terms = allTerms
terms = [term.split() for term in terms]
# 3. load the DrQA corenlp_tokenizer processed OS textbook data
mypath='/home/jack/Documents/openstaxTextbook/'+subject+'/sent_by_mo/corenlp.processed/test/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
try:
mkdir(mypath + 'ans.augmented/'+str(num_sents))
except:
pass
from math import floor
for fname in onlyfiles:
data = []
try:
for line in codecs.open(mypath+fname, 'r', 'utf-8'):
data.append(json.loads(line))
except:
continue
filtered = data
# select truncation level
cat = num_sents
catData = []
for i in range(floor(cat/2.0), len(data)-floor(cat/2.0)):
newData = dict.fromkeys(data[0].keys())
document = []
lemma = []
sentIdx = []
pos = []
ner = []
for j in range(i-floor(cat/2.0), i+floor(cat/2.0)+1):
# set_trace()
document += data[j]['document']
lemma += data[j]['lemma']
sentIdx += data[j]['sentIdx']
pos += data[j]['pos']
ner += data[j]['ner']
newData['document'] = document
newData['lemma'] = lemma
newData['sentIdx'] = sentIdx
newData['pos'] = pos
newData['ner'] = ner
catData.append(newData)
print('number of paragraphs with ' + str(num_sents) + ' sentences per paragraph: ' + str(len(catData)))
# use some heuristics to find answers
# if multiple answers are found, duplicate this current datapoint
print('start processing file: ' + fname)
dataAns = []
for dataIdx in range(len(catData)):
ner = catData[dataIdx]['ner']
pos = catData[dataIdx]['pos']
ansInds = [] # list of lists of answer locations
ansIdxs = []
# for idx in range(len(ner)):
# for pattern in patterns:
# if idx+len(pattern[0])<len(ner) and pos[idx:idx+len(pattern[0])]==pattern[0] and ner[idx:idx+len(pattern[1])]==pattern[1]:
# ansIdxs.append(list(range(idx,idx+len(pattern[0]))))
# set_trace()
# # simplify
# new_ansIdxs = []
# for idx1 in range(len(ansIdxs)):
# item = ansIdxs[idx1]
# Add = True
# for idx2 in range(len(ansIdxs)):
# if idx1!=idx2 and set(item).issubset(set(ansIdxs[idx2])):
# Add = False
# if Add:
# new_ansIdxs.append(item)
# ansIdxs = new_ansIdxs
# find answers using special ner tags and terms
idx = 0
while idx < len(ner):
# using ner
if ner[idx] != 'O':
k = idx+1
while k < len(ner):
if ner[k] == ner[idx]:
k += 1
else:
break
ansIdxs.append(list(range(idx,k)))
idx = k
else:
idx += 1
# find answers using terms
for idx in range(len(ner)):
for term in terms:
Add = True
for i in range(len(term)):
if idx+i<len(ner) and catData[dataIdx]['document'][idx+i].lower() != term[i]:
Add = False
if Add:
ansIdxs.append(list(range(idx,idx+len(term))))
# set_trace()
# simplify
new_ansIdxs = []
for ansIdx in ansIdxs:
# do not include duplicate answer indices
if ansIdx not in new_ansIdxs:
new_ansIdxs.append(ansIdx)
ansIdxs = new_ansIdxs
# print(' found ' + str(len(ansIdxs)) + ' answers in this paragraph.')
for idx in range(len(ansIdxs)):
ansInd = ['-'] * len(ner)
for j in ansIdxs[idx]:
ansInd[j] = 'A'
ansInds.append(ansInd)
for idx in range(len(ansInds)):
ansInd = ansInds[idx]
newData = deepcopy(catData[dataIdx])
newData['ansInd'] = ansInd
dataAns.append(newData)
# set_trace()
# # simplify
# newDataAns = []
# answers = []
# for ex in dataAns:
# # extract answer
# ans = []
# for idx in range(len(ex['ansInd'])):
# if ex['ansInd'][idx] == 'A':
# ans.append(ex['document'][idx])
# if ans not in answers:
# answers.append(ans)
# newDataAns.append(ex)
# dataAns = newDataAns
print('finding answer completed.')
# set_trace()
# get case info
for i in range(len(dataAns)):
doc_case = []
for w in dataAns[i]['document']:
if w.isalpha():
if w.islower():
doc_case.append('L')
else:
doc_case.append('U')
else:
doc_case.append('L')
dataAns[i]['doc_case'] = doc_case
# set_trace()
# append case, pos, ner, ansInd as features to context
out_file = mypath + 'ans.augmented/'+str(num_sents)+'/' + fname
with open(out_file, 'wb') as f:
for ex in dataAns:
line = u' '.join([ex['document'][idx].replace(' ', '').lower() + '│' + ex['doc_case'][idx] + '│' +
ex['pos'][idx] + '│' + ex['ner'][idx] + '│' + ex['ansInd'][idx]
for idx in range(len(ex['document']))]).encode('utf-8').strip()
f.write(line + u'\n'.encode('utf-8'))
f.close()
# content content and answer
out_file = mypath + 'ans.augmented/'+str(num_sents)+'/contentOnly' + fname
with open(out_file, 'wb') as f:
for ex in dataAns:
ans = []
for idx in range(len(ex['ansInd'])):
if ex['ansInd'][idx] == 'A':
ans.append(ex['document'][idx])
line = u' '.join(ans + [' ||| '] + [ex['document'][idx].replace(' ', '').lower()
for idx in range(len(ex['document']))]).encode('utf-8').strip()
f.write(line + u'\n'.encode('utf-8'))
f.close()
print('----------------------------------')
|
[
"copy.deepcopy",
"json.load",
"codecs.open",
"argparse.ArgumentParser",
"json.loads",
"math.floor",
"os.path.join",
"os.listdir"
] |
[((1665, 1726), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (1688, 1726), False, 'import argparse\n'), ((2378, 2390), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2387, 2390), False, 'import json\n'), ((2243, 2255), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2252, 2255), False, 'import json\n'), ((2699, 2714), 'os.listdir', 'listdir', (['mypath'], {}), '(mypath)\n', (2706, 2714), False, 'from os import listdir\n'), ((2911, 2952), 'codecs.open', 'codecs.open', (['(mypath + fname)', '"""r"""', '"""utf-8"""'], {}), "(mypath + fname, 'r', 'utf-8')\n", (2922, 2952), False, 'import codecs\n'), ((3131, 3147), 'math.floor', 'floor', (['(cat / 2.0)'], {}), '(cat / 2.0)\n', (3136, 3147), False, 'from math import floor\n'), ((2725, 2740), 'os.path.join', 'join', (['mypath', 'f'], {}), '(mypath, f)\n', (2729, 2740), False, 'from os.path import isfile, join\n'), ((3157, 3173), 'math.floor', 'floor', (['(cat / 2.0)'], {}), '(cat / 2.0)\n', (3162, 3173), False, 'from math import floor\n'), ((6535, 6561), 'copy.deepcopy', 'deepcopy', (['catData[dataIdx]'], {}), '(catData[dataIdx])\n', (6543, 6561), False, 'from copy import deepcopy\n'), ((2976, 2992), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2986, 2992), False, 'import json\n'), ((3343, 3359), 'math.floor', 'floor', (['(cat / 2.0)'], {}), '(cat / 2.0)\n', (3348, 3359), False, 'from math import floor\n'), ((3361, 3377), 'math.floor', 'floor', (['(cat / 2.0)'], {}), '(cat / 2.0)\n', (3366, 3377), False, 'from math import floor\n')]
|
# https://pytorchnlp.readthedocs.io/en/latest/_modules/torchnlp/nn/attention.html
import torch
import torch.nn as nn
class Attention(nn.Module):
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
# >>> attention = Attention(256)
# >>> query = torch.randn(5, 1, 256)
# >>> context = torch.randn(5, 5, 256)
# >>> output, weights = attention(query, context)
# >>> output.size()
# torch.Size([5, 1, 256])
# >>> weights.size()
# torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def forward(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == "general":
query = query.reshape(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.reshape(batch_size, output_len, dimensions)
# TODO: Include mask on PADDING_INDEX?
# (batch_size, output_len, dimensions) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, query_len)
attention_scores = torch.bmm(query, context.transpose(1, 2).contiguous())
# Compute weights across every context sequence
attention_scores = attention_scores.view(batch_size * output_len, query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len, query_len)
# (batch_size, output_len, query_len) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, dimensions)
mix = torch.bmm(attention_weights, context)
# concat -> (batch_size * output_len, 2*dimensions)
combined = torch.cat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
# Apply linear_out on every 2nd dimension of concat
# output -> (batch_size, output_len, dimensions)
output = self.linear_out(combined).view(batch_size, output_len, dimensions)
output = self.tanh(output)
return output, attention_weights
|
[
"torch.bmm",
"torch.nn.Tanh",
"torch.cat",
"torch.nn.Softmax",
"torch.nn.Linear"
] |
[((1421, 1470), 'torch.nn.Linear', 'nn.Linear', (['(dimensions * 2)', 'dimensions'], {'bias': '(False)'}), '(dimensions * 2, dimensions, bias=False)\n', (1430, 1470), True, 'import torch.nn as nn\n'), ((1494, 1512), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (1504, 1512), True, 'import torch.nn as nn\n'), ((1533, 1542), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1540, 1542), True, 'import torch.nn as nn\n'), ((3317, 3354), 'torch.bmm', 'torch.bmm', (['attention_weights', 'context'], {}), '(attention_weights, context)\n', (3326, 3354), False, 'import torch\n'), ((3435, 3465), 'torch.cat', 'torch.cat', (['(mix, query)'], {'dim': '(2)'}), '((mix, query), dim=2)\n', (3444, 3465), False, 'import torch\n'), ((1348, 1393), 'torch.nn.Linear', 'nn.Linear', (['dimensions', 'dimensions'], {'bias': '(False)'}), '(dimensions, dimensions, bias=False)\n', (1357, 1393), True, 'import torch.nn as nn\n')]
|
from app.common.adapter.repositories.sql import db
from ....domain.models import Author, Tag, TagId, Article, ArticleId
tag = db.Table(
'tag',
db.Column('id', db.BigInteger, primary_key=True, unique=True, key='__id'),
db.Column('name', db.String(50), unique=True)
)
TagId.__composite_values__ = lambda self: (self.value,)
db.mapper(Tag, tag, properties={
'id': db.composite(TagId, tag.c.__id),
})
article = db.Table(
'article',
db.Column('id', db.BigInteger, primary_key=True, unique=True, key='__id'),
db.Column('title', db.String(100)),
db.Column('content', db.Text),
db.Column('author_id', db.BigInteger, key='__author_id'),
db.Column('author_name', db.String(50), key='__author_name'),
db.Column('created_at', db.DateTime(timezone=True)),
db.Column('updated_at', db.DateTime(timezone=True)),
db.Column('deleted_at', db.DateTime(timezone=True)),
)
ArticleId.__composite_values__ = lambda self: (self.value,)
Author.__composite_values__ = lambda self: (self.id, self.name)
tag_article_association = db.Table(
'tag_article_association',
db.Column('tag_id', db.BigInteger, db.ForeignKey('tag.__id')),
db.Column('article_id', db.BigInteger, db.ForeignKey('article.__id'))
)
db.mapper(Article, article, properties={
'id': db.composite(ArticleId, article.c.__id),
'author': db.composite(
Author, article.c.__author_id, article.c.__author_name),
'tags': db.relationship(Tag, secondary=tag_article_association)
})
|
[
"app.common.adapter.repositories.sql.db.ForeignKey",
"app.common.adapter.repositories.sql.db.DateTime",
"app.common.adapter.repositories.sql.db.String",
"app.common.adapter.repositories.sql.db.relationship",
"app.common.adapter.repositories.sql.db.composite",
"app.common.adapter.repositories.sql.db.Column"
] |
[((152, 225), 'app.common.adapter.repositories.sql.db.Column', 'db.Column', (['"""id"""', 'db.BigInteger'], {'primary_key': '(True)', 'unique': '(True)', 'key': '"""__id"""'}), "('id', db.BigInteger, primary_key=True, unique=True, key='__id')\n", (161, 225), False, 'from app.common.adapter.repositories.sql import db\n'), ((456, 529), 'app.common.adapter.repositories.sql.db.Column', 'db.Column', (['"""id"""', 'db.BigInteger'], {'primary_key': '(True)', 'unique': '(True)', 'key': '"""__id"""'}), "('id', db.BigInteger, primary_key=True, unique=True, key='__id')\n", (465, 529), False, 'from app.common.adapter.repositories.sql import db\n'), ((575, 604), 'app.common.adapter.repositories.sql.db.Column', 'db.Column', (['"""content"""', 'db.Text'], {}), "('content', db.Text)\n", (584, 604), False, 'from app.common.adapter.repositories.sql import db\n'), ((610, 666), 'app.common.adapter.repositories.sql.db.Column', 'db.Column', (['"""author_id"""', 'db.BigInteger'], {'key': '"""__author_id"""'}), "('author_id', db.BigInteger, key='__author_id')\n", (619, 666), False, 'from app.common.adapter.repositories.sql import db\n'), ((249, 262), 'app.common.adapter.repositories.sql.db.String', 'db.String', (['(50)'], {}), '(50)\n', (258, 262), False, 'from app.common.adapter.repositories.sql import db\n'), ((554, 568), 'app.common.adapter.repositories.sql.db.String', 'db.String', (['(100)'], {}), '(100)\n', (563, 568), False, 'from app.common.adapter.repositories.sql import db\n'), ((697, 710), 'app.common.adapter.repositories.sql.db.String', 'db.String', (['(50)'], {}), '(50)\n', (706, 710), False, 'from app.common.adapter.repositories.sql import db\n'), ((762, 788), 'app.common.adapter.repositories.sql.db.DateTime', 'db.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (773, 788), False, 'from app.common.adapter.repositories.sql import db\n'), ((819, 845), 'app.common.adapter.repositories.sql.db.DateTime', 'db.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (830, 845), False, 'from app.common.adapter.repositories.sql import db\n'), ((876, 902), 'app.common.adapter.repositories.sql.db.DateTime', 'db.DateTime', ([], {'timezone': '(True)'}), '(timezone=True)\n', (887, 902), False, 'from app.common.adapter.repositories.sql import db\n'), ((1139, 1164), 'app.common.adapter.repositories.sql.db.ForeignKey', 'db.ForeignKey', (['"""tag.__id"""'], {}), "('tag.__id')\n", (1152, 1164), False, 'from app.common.adapter.repositories.sql import db\n'), ((1210, 1239), 'app.common.adapter.repositories.sql.db.ForeignKey', 'db.ForeignKey', (['"""article.__id"""'], {}), "('article.__id')\n", (1223, 1239), False, 'from app.common.adapter.repositories.sql import db\n'), ((380, 411), 'app.common.adapter.repositories.sql.db.composite', 'db.composite', (['TagId', 'tag.c.__id'], {}), '(TagId, tag.c.__id)\n', (392, 411), False, 'from app.common.adapter.repositories.sql import db\n'), ((1295, 1334), 'app.common.adapter.repositories.sql.db.composite', 'db.composite', (['ArticleId', 'article.c.__id'], {}), '(ArticleId, article.c.__id)\n', (1307, 1334), False, 'from app.common.adapter.repositories.sql import db\n'), ((1350, 1418), 'app.common.adapter.repositories.sql.db.composite', 'db.composite', (['Author', 'article.c.__author_id', 'article.c.__author_name'], {}), '(Author, article.c.__author_id, article.c.__author_name)\n', (1362, 1418), False, 'from app.common.adapter.repositories.sql import db\n'), ((1441, 1496), 'app.common.adapter.repositories.sql.db.relationship', 'db.relationship', (['Tag'], {'secondary': 'tag_article_association'}), '(Tag, secondary=tag_article_association)\n', (1456, 1496), False, 'from app.common.adapter.repositories.sql import db\n')]
|
# this script is based on ./examples/cars segmentation (camvid).ipynb
# ========== loading data ==========
'''
For this example, we will use PASCAL2012 dataset. It is a set of:
- train images + instance segmentation masks
- validation images + instance segmentation masks
'''
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0' # Will use only the first GPU device
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from skimage.transform import resize
from skimage.morphology import dilation, thin, binary_erosion
from scipy.ndimage import measurements
from tqdm import tqdm
import math
DATA_DIR = './data/voc/VOC2012/'
x_dir = os.path.join(DATA_DIR, 'JPEGImages')
y_dir = os.path.join(DATA_DIR, 'SegmentationObject')
cls_dir = os.path.join(DATA_DIR, 'SegmentationClass')
train_ids = os.path.join(DATA_DIR, 'ImageSets/Segmentation/train.txt')
valid_ids = os.path.join(DATA_DIR, 'ImageSets/Segmentation/val.txt')
# some useful constants
dim = (256, 256) # resize all images to dim=(256, 256)
background_centerness_const = 0 # gt val to indicate the pixel is a background
# ========== visualization helper ==========
def viz_centerness(y, id, folder_name):
"""Visualize centerness based on centerness scores
Args:
y: array of size [(1, H, W)], with the first dimension being centerness score
Return:
visualization plot
"""
# read in ground truth, display as background in plots
im = plt.imread(os.path.join(DATA_DIR, 'SegmentationObject', id[:-3]+'.png'))
im = np.where(im == 0, 255, im) # convert the background pixels to white (for visualization)
res_im = resize(im, dim)
mask = np.array(Image.open(os.path.join(DATA_DIR, 'SegmentationObject', id[:-3]+'.png')).resize(dim, resample=Image.NEAREST), dtype=int)
mask = np.where(mask == 255, 0, mask)
y = y.squeeze()
# y = np.where(mask == 0, 0, y)
# save centerness image
plt.figure(figsize = (20, 20))
plt.xticks([])
plt.yticks([])
plt.imshow(y[:,:,None], cmap='gray_r', interpolation='nearest')
plt.imshow(res_im, interpolation='nearest', alpha=0.4)
plt.savefig('./' + folder_name + '/' + id + '.png', dpi=300, bbox_inches='tight')
plt.close()
# save modified mask
modified_mask = np.zeros(mask.shape)
for label in np.unique(mask):
if label == 0:
continue
# perform morphological thinning and dilation
cur_mask = np.array(mask == label, dtype=int)
cur_mask = binary_erosion(cur_mask)
cur_mask = dilation(cur_mask)
modified_mask = modified_mask + cur_mask
# save modified mask
plt.figure(figsize = (20, 20))
plt.xticks([])
plt.yticks([])
plt.imshow(modified_mask[:,:,None], cmap='gray_r', interpolation='nearest')
plt.imshow(res_im, interpolation='nearest', alpha=0.4)
plt.savefig('./' + folder_name + '/' + id[:-3] + '.png', dpi=300, bbox_inches='tight')
plt.close()
# ========== data loader ==========
'''
Writing helper class for data extraction, tranformation and preprocessing
https://pytorch.org/docs/stable/data
'''
from torch.utils.data import DataLoader
from torch.utils.data import Dataset as BaseDataset
class Dataset(BaseDataset):
"""PASCAL Dataset. Read images, apply augmentation and preprocessing transformations.
Args:
images_dir (str): path to images folder
masks_dir (str): path to segmentation masks folder
img_ids (str): path to the file containing image ids
augmentation (albumentations.Compose): data transfromation pipeline
(e.g. flip, scale, etc.)
preprocessing (albumentations.Compose): data preprocessing
(e.g. noralization, shape manipulation, etc.)
"""
def __init__(
self,
images_dir,
masks_dir,
img_ids,
augmentation=None,
preprocessing=None,
):
with open(img_ids, 'r') as f:
self.ids = [x.strip() for x in f.readlines()]
print(img_ids + ': ' + str(len(self.ids)) + ' Images')
self.images_fps = [os.path.join(images_dir, image_id + '.jpg') for image_id in self.ids]
self.masks_fps = [os.path.join(masks_dir, image_id + '.png') for image_id in self.ids]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __getitem__(self, i):
# read data
image = np.array(Image.open(self.images_fps[i]).resize(dim, resample=Image.NEAREST))
mask = np.array(Image.open(self.masks_fps[i]).resize(dim, resample=Image.NEAREST), dtype=int)
assert image.shape[:-1] == mask.shape
# concat xy position info on image (RGBXY)
# xx, yy = np.arange(image.shape[0]), np.arange(image.shape[1])
# grid_x, grid_y = np.meshgrid(xx, yy, indexing='ij')
# image = np.concatenate((image, grid_x[:,:,None], grid_y[:,:,None]), axis=-1).astype('float')
mask = np.where(mask == 255, 0, mask) # convert the void pixels to background
# print(np.unique(mask))
centerness = np.zeros(mask.shape)
weight = np.ones(mask.shape)
eps = 0.0001
for label in np.unique(mask):
if label == 0:
continue
# perform morphological thinning and dilation
cur_mask = (mask == label)
cur_mask = binary_erosion(cur_mask)
cur_mask = dilation(cur_mask)
if np.count_nonzero(cur_mask) == 0: # avoid empty object after erosion
cur_mask = (mask == label)
# compute the center coordinate by average all coordinates in the current object
xx, yy = np.arange(mask.shape[0]), np.arange(mask.shape[1])
grid_x, grid_y = np.meshgrid(xx, yy, indexing='ij')
grid_x = np.where(cur_mask == 1, grid_x, 0)
grid_y = np.where(cur_mask == 1, grid_y, 0)
center_x, center_y = np.sum(grid_x) / np.count_nonzero(grid_x), np.sum(grid_y) / np.count_nonzero(grid_y)
# assign center-ness score (between 0 and 1) to each pixel of 'label' based on distance to center
# const / distance
x_sqr_diff = np.square(np.absolute(grid_x - center_x))
y_sqr_diff = np.square(np.absolute(grid_y - center_y))
dist = np.sqrt(x_sqr_diff + y_sqr_diff) + eps # prevent division by 0
scores = np.minimum(1, np.divide(10, dist))
scores = np.where(mask == label, scores, 0)
# uniformly spread more weight to smaller objects (at least weight of a radius-10 circle)
if np.sum(scores) >= 314:
centerness = np.where(mask == label, scores, centerness)
else:
inc = (314 - np.sum(scores)) / np.sum(mask == label)
centerness = np.where(mask == label, scores + inc, centerness)
# update weight on pixels of current instance, used in loss computation
# weight = np.where(mask == label, 10000 / np.sum(mask == label), weight)
# weight = np.where(mask == label, (100 / np.sum(mask == label))**2, weight)
weight = np.where(mask == label, 100 / math.sqrt(np.sum(mask == label)), weight)
# sanity check
assert np.all(centerness >= np.zeros(centerness.shape))
assert np.all(centerness[mask == 0] == 0)
assert np.all(centerness[mask != 0] > 0)
centerness = centerness[:,:,None].astype('float')
weight = weight[:,:,None].astype('float')
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=image, mask=centerness)
image, centerness = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=image, mask=centerness)
image, centerness = sample['image'], sample['mask']
return image, centerness, weight.transpose(2, 0, 1).astype('float32')
def __len__(self):
return len(self.ids)
# look at the data we have
# dataset = Dataset(x_dir, y_dir, train_ids)
# image, centerness = dataset[14] # get some sample
# with open(train_ids, 'r') as f:
# ids = [x.strip() for x in f.readlines()]
# print(ids[14])
# viz_centerness(centerness, 'plot2', '.')
# ========== preprocess image ==========
import albumentations as albu
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=to_tensor, mask=to_tensor),
]
return albu.Compose(_transform)
# ========== create model and train ==========
import torch
import segmentation_models_pytorch as smp
ENCODER = 'se_resnext50_32x4d'
ENCODER_WEIGHTS = 'imagenet'
pascal_class = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike',
'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] # 20 classes (excluding background)
ACTIVATION = 'sigmoid' # for centerness head
DEVICE = 'cuda'
# create segmentation model with pretrained encoder
model = smp.FPN(
encoder_name=ENCODER,
encoder_weights=ENCODER_WEIGHTS,
classes=1, # centerness (1 output channels)
activation=ACTIVATION,
)
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
train_dataset = Dataset(
x_dir,
y_dir,
train_ids,
preprocessing=get_preprocessing(preprocessing_fn),
)
valid_dataset = Dataset(
x_dir,
y_dir,
valid_ids,
preprocessing=get_preprocessing(preprocessing_fn),
)
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=6)
valid_loader = DataLoader(valid_dataset, batch_size=8, shuffle=False, num_workers=6)
# Dice/F1 score - https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
# IoU/Jaccard score - https://en.wikipedia.org/wiki/Jaccard_index
loss = smp.utils.losses.Weighted_MSELoss(ignore_val=background_centerness_const) # smp.utils.losses.L1Loss(ignore_val=background_centerness_const) # total loss computed on object pixels
metrics = [
smp.utils.metrics.L1_centerness_object(), # per pixel score
]
optimizer = torch.optim.Adam([
dict(params=model.parameters(), lr=0.0001),
])
# create epoch runners
# it is a simple loop of iterating over dataloader`s samples
train_epoch = smp.utils.train.TrainEpoch(
model,
loss=loss,
metrics=metrics,
optimizer=optimizer,
device=DEVICE,
verbose=True,
)
valid_epoch = smp.utils.train.ValidEpoch(
model,
loss=loss,
metrics=metrics,
device=DEVICE,
verbose=True,
)
# # train model for 20 epochs
# min_score = 100000
# '''plot the training and validation losses
# sanity check if they are decreasing over epochs
# '''
# train_loss, valid_loss = [], []
# l1_centerness = []
# epochs = range(0,20)
# for i in range(0,20):
# print('\nEpoch: {}'.format(i))
# train_logs = train_epoch.run(train_loader)
# valid_logs = valid_epoch.run(valid_loader)
# train_loss.append(train_logs['weighted_mse_loss'])
# valid_loss.append(valid_logs['weighted_mse_loss'])
# l1_centerness.append(valid_logs['L1_centerness_object'])
# # do something (save model, change lr, etc.)
# if valid_logs['L1_centerness_object'] < min_score:
# min_score = valid_logs['L1_centerness_object']
# torch.save(model, './best_model_centerness_weightedL2_invsqrt.pth')
# print('Model saved!')
# # save the plots of training and validation losses
# plt.plot(epochs, train_loss, label='training_loss', color='red')
# plt.plot(epochs, valid_loss, label='validation_loss', color='blue')
# plt.title('loss visualization', fontsize=12)
# plt.legend(loc='upper left')
# plt.xlabel('epochs', fontsize=12)
# plt.ylabel('loss', fontsize=12)
# plt.savefig('./loss.png', dpi=300, bbox_inches='tight')
# plt.close()
# ========== visualize predictions ==========
# load best saved checkpoint
best_model = torch.load('./best_model_centerness_weightedL2_invsqrt.pth')\
# with open(valid_ids, 'r') as f:
# ids = [x.strip() for x in f.readlines()]
# for idx in range(10):
# i = idx # np.random.choice(len(valid_dataset))
# print(ids[i])
# image, gt_mask, weight = valid_dataset[i]
# gt_mask = gt_mask.squeeze()
# x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)
# centerness = best_model.predict(x_tensor)
# centerness = (centerness.squeeze().cpu().numpy().round())
# # print(np.sum(centerness))
# # print(np.amax(centerness))
# # print(np.amin(centerness))
# viz_centerness(gt_mask, str(ids[i]) + '_gt', 'centerness_val_viz')
# viz_centerness(centerness, str(ids[i]) + '_pr', 'centerness_val_viz')
# with open(train_ids, 'r') as f:
# ids = [x.strip() for x in f.readlines()]
# for idx in [24, 121, 135, 431, 837, 871, 966, 1118, 1294, 1374]:
# i = idx # np.random.choice(len(train_dataset))
# print(ids[i])
# image, gt_mask, weight = train_dataset[i]
# gt_mask = gt_mask.squeeze()
# x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)
# centerness = best_model.predict(x_tensor)
# centerness = (centerness.squeeze().cpu().numpy().round())
# viz_centerness(gt_mask, str(ids[i]) + '_gt', 'centerness_train_viz')
# viz_centerness(centerness, str(ids[i]) + '_pr', 'centerness_train_viz')
# ========== other evaluations ==========
# percentage of correct centers detected (on validation set)
with open(valid_ids, 'r') as f:
val_ids_arr = [x.strip() for x in f.readlines()]
masks_fps = [os.path.join(y_dir, image_id + '.png') for image_id in val_ids_arr]
semantic_fps = [os.path.join(cls_dir, image_id + '.png') for image_id in val_ids_arr]
total_count = 0
evals = dict()
for i in tqdm(range(len(val_ids_arr))):
image, gt_mask, weight = valid_dataset[i]
x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)
centerness = best_model.predict(x_tensor)
# 1) threshold centerness scores
centerness = (centerness.squeeze().cpu().numpy().round())
# 2) compute connected components
labeled_array, num_features = measurements.label(centerness)
obj_mask = np.array(Image.open(masks_fps[i]).resize(dim, resample=Image.NEAREST), dtype=int)
cls_mask = np.array(Image.open(semantic_fps[i]).resize(dim, resample=Image.NEAREST), dtype=int)
image = image.transpose(1, 2, 0)
assert image.shape[:-1] == obj_mask.shape
assert image.shape[:-1] == cls_mask.shape
obj_mask = np.where(obj_mask == 255, 0, obj_mask) # convert the void pixels to background
cls_mask = np.where(cls_mask == 255, 0, cls_mask)
# 3) assign connected component to class it has the largest overlap with
for component in np.unique(labeled_array):
if component == 0: continue
largest_overlap = 0
cls_belong = 0
for cur_cls in np.unique(cls_mask):
if cur_cls == 0: continue
overlap = np.count_nonzero(np.logical_and(labeled_array == component, cls_mask == cur_cls))
if overlap > largest_overlap:
largest_overlap = overlap
cls_belong = cur_cls
pos = np.logical_and(labeled_array == component, cls_mask == cls_belong)
labeled_array[labeled_array == component] = 0
labeled_array[pos] = component
# 4) compute how many connected components fall inside each instance
for label in np.unique(obj_mask):
if label == 0:
continue
cur_centerness = np.where(obj_mask == label, labeled_array, 0)
num_components = np.unique(cur_centerness)
num_components = len(num_components[num_components != 0])
if num_components in evals:
evals[num_components] += 1
else:
evals[num_components] = 1
total_count += 1
# 5) compute precentages of connected components per instance
print("====================")
print("total number of instances in val set: " + str(total_count))
for num_components in evals:
percentage = evals[num_components] / total_count
print(str(num_components) + ' connected component(s): ' + str(percentage))
|
[
"numpy.absolute",
"segmentation_models_pytorch.utils.train.ValidEpoch",
"albumentations.Lambda",
"numpy.sum",
"scipy.ndimage.measurements.label",
"numpy.ones",
"segmentation_models_pytorch.utils.train.TrainEpoch",
"matplotlib.pyplot.figure",
"skimage.transform.resize",
"numpy.arange",
"segmentation_models_pytorch.utils.metrics.L1_centerness_object",
"os.path.join",
"numpy.unique",
"skimage.morphology.dilation",
"numpy.meshgrid",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.imshow",
"torch.load",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.close",
"skimage.morphology.binary_erosion",
"matplotlib.pyplot.xticks",
"numpy.divide",
"segmentation_models_pytorch.utils.losses.Weighted_MSELoss",
"segmentation_models_pytorch.FPN",
"numpy.all",
"torch.from_numpy",
"albumentations.Compose",
"numpy.count_nonzero",
"numpy.logical_and",
"numpy.zeros",
"PIL.Image.open",
"numpy.where",
"numpy.array",
"segmentation_models_pytorch.encoders.get_preprocessing_fn",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((702, 738), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""JPEGImages"""'], {}), "(DATA_DIR, 'JPEGImages')\n", (714, 738), False, 'import os\n'), ((747, 791), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""SegmentationObject"""'], {}), "(DATA_DIR, 'SegmentationObject')\n", (759, 791), False, 'import os\n'), ((802, 845), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""SegmentationClass"""'], {}), "(DATA_DIR, 'SegmentationClass')\n", (814, 845), False, 'import os\n'), ((859, 917), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""ImageSets/Segmentation/train.txt"""'], {}), "(DATA_DIR, 'ImageSets/Segmentation/train.txt')\n", (871, 917), False, 'import os\n'), ((930, 986), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""ImageSets/Segmentation/val.txt"""'], {}), "(DATA_DIR, 'ImageSets/Segmentation/val.txt')\n", (942, 986), False, 'import os\n'), ((9628, 9728), 'segmentation_models_pytorch.FPN', 'smp.FPN', ([], {'encoder_name': 'ENCODER', 'encoder_weights': 'ENCODER_WEIGHTS', 'classes': '(1)', 'activation': 'ACTIVATION'}), '(encoder_name=ENCODER, encoder_weights=ENCODER_WEIGHTS, classes=1,\n activation=ACTIVATION)\n', (9635, 9728), True, 'import segmentation_models_pytorch as smp\n'), ((9797, 9856), 'segmentation_models_pytorch.encoders.get_preprocessing_fn', 'smp.encoders.get_preprocessing_fn', (['ENCODER', 'ENCODER_WEIGHTS'], {}), '(ENCODER, ENCODER_WEIGHTS)\n', (9830, 9856), True, 'import segmentation_models_pytorch as smp\n'), ((10117, 10185), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': '(8)', 'shuffle': '(True)', 'num_workers': '(6)'}), '(train_dataset, batch_size=8, shuffle=True, num_workers=6)\n', (10127, 10185), False, 'from torch.utils.data import DataLoader\n'), ((10201, 10270), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_dataset'], {'batch_size': '(8)', 'shuffle': '(False)', 'num_workers': '(6)'}), '(valid_dataset, batch_size=8, shuffle=False, num_workers=6)\n', (10211, 10270), False, 'from torch.utils.data import DataLoader\n'), ((10433, 10506), 'segmentation_models_pytorch.utils.losses.Weighted_MSELoss', 'smp.utils.losses.Weighted_MSELoss', ([], {'ignore_val': 'background_centerness_const'}), '(ignore_val=background_centerness_const)\n', (10466, 10506), True, 'import segmentation_models_pytorch as smp\n'), ((10874, 10990), 'segmentation_models_pytorch.utils.train.TrainEpoch', 'smp.utils.train.TrainEpoch', (['model'], {'loss': 'loss', 'metrics': 'metrics', 'optimizer': 'optimizer', 'device': 'DEVICE', 'verbose': '(True)'}), '(model, loss=loss, metrics=metrics, optimizer=\n optimizer, device=DEVICE, verbose=True)\n', (10900, 10990), True, 'import segmentation_models_pytorch as smp\n'), ((11031, 11125), 'segmentation_models_pytorch.utils.train.ValidEpoch', 'smp.utils.train.ValidEpoch', (['model'], {'loss': 'loss', 'metrics': 'metrics', 'device': 'DEVICE', 'verbose': '(True)'}), '(model, loss=loss, metrics=metrics, device=DEVICE,\n verbose=True)\n', (11057, 11125), True, 'import segmentation_models_pytorch as smp\n'), ((12506, 12566), 'torch.load', 'torch.load', (['"""./best_model_centerness_weightedL2_invsqrt.pth"""'], {}), "('./best_model_centerness_weightedL2_invsqrt.pth')\n", (12516, 12566), False, 'import torch\n'), ((1587, 1613), 'numpy.where', 'np.where', (['(im == 0)', '(255)', 'im'], {}), '(im == 0, 255, im)\n', (1595, 1613), True, 'import numpy as np\n'), ((1688, 1703), 'skimage.transform.resize', 'resize', (['im', 'dim'], {}), '(im, dim)\n', (1694, 1703), False, 'from skimage.transform import resize\n'), ((1856, 1886), 'numpy.where', 'np.where', (['(mask == 255)', '(0)', 'mask'], {}), '(mask == 255, 0, mask)\n', (1864, 1886), True, 'import numpy as np\n'), ((1980, 2008), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1990, 2008), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2029), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2025, 2029), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2048), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2044, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2118), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y[:, :, None]'], {'cmap': '"""gray_r"""', 'interpolation': '"""nearest"""'}), "(y[:, :, None], cmap='gray_r', interpolation='nearest')\n", (2063, 2118), True, 'import matplotlib.pyplot as plt\n'), ((2121, 2175), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res_im'], {'interpolation': '"""nearest"""', 'alpha': '(0.4)'}), "(res_im, interpolation='nearest', alpha=0.4)\n", (2131, 2175), True, 'import matplotlib.pyplot as plt\n'), ((2180, 2266), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + folder_name + '/' + id + '.png')"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('./' + folder_name + '/' + id + '.png', dpi=300, bbox_inches=\n 'tight')\n", (2191, 2266), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2277), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2275, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2324, 2344), 'numpy.zeros', 'np.zeros', (['mask.shape'], {}), '(mask.shape)\n', (2332, 2344), True, 'import numpy as np\n'), ((2362, 2377), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (2371, 2377), True, 'import numpy as np\n'), ((2696, 2724), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (2706, 2724), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2745), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (2741, 2745), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2764), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (2760, 2764), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2846), 'matplotlib.pyplot.imshow', 'plt.imshow', (['modified_mask[:, :, None]'], {'cmap': '"""gray_r"""', 'interpolation': '"""nearest"""'}), "(modified_mask[:, :, None], cmap='gray_r', interpolation='nearest')\n", (2779, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2903), 'matplotlib.pyplot.imshow', 'plt.imshow', (['res_im'], {'interpolation': '"""nearest"""', 'alpha': '(0.4)'}), "(res_im, interpolation='nearest', alpha=0.4)\n", (2859, 2903), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2998), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./' + folder_name + '/' + id[:-3] + '.png')"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('./' + folder_name + '/' + id[:-3] + '.png', dpi=300,\n bbox_inches='tight')\n", (2919, 2998), True, 'import matplotlib.pyplot as plt\n'), ((2999, 3010), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3008, 3010), True, 'import matplotlib.pyplot as plt\n'), ((9023, 9047), 'albumentations.Compose', 'albu.Compose', (['_transform'], {}), '(_transform)\n', (9035, 9047), True, 'import albumentations as albu\n'), ((10628, 10668), 'segmentation_models_pytorch.utils.metrics.L1_centerness_object', 'smp.utils.metrics.L1_centerness_object', ([], {}), '()\n', (10666, 10668), True, 'import segmentation_models_pytorch as smp\n'), ((14110, 14148), 'os.path.join', 'os.path.join', (['y_dir', "(image_id + '.png')"], {}), "(y_dir, image_id + '.png')\n", (14122, 14148), False, 'import os\n'), ((14194, 14234), 'os.path.join', 'os.path.join', (['cls_dir', "(image_id + '.png')"], {}), "(cls_dir, image_id + '.png')\n", (14206, 14234), False, 'import os\n'), ((14662, 14692), 'scipy.ndimage.measurements.label', 'measurements.label', (['centerness'], {}), '(centerness)\n', (14680, 14692), False, 'from scipy.ndimage import measurements\n'), ((15040, 15078), 'numpy.where', 'np.where', (['(obj_mask == 255)', '(0)', 'obj_mask'], {}), '(obj_mask == 255, 0, obj_mask)\n', (15048, 15078), True, 'import numpy as np\n'), ((15134, 15172), 'numpy.where', 'np.where', (['(cls_mask == 255)', '(0)', 'cls_mask'], {}), '(cls_mask == 255, 0, cls_mask)\n', (15142, 15172), True, 'import numpy as np\n'), ((15272, 15296), 'numpy.unique', 'np.unique', (['labeled_array'], {}), '(labeled_array)\n', (15281, 15296), True, 'import numpy as np\n'), ((15957, 15976), 'numpy.unique', 'np.unique', (['obj_mask'], {}), '(obj_mask)\n', (15966, 15976), True, 'import numpy as np\n'), ((1516, 1578), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""SegmentationObject"""', "(id[:-3] + '.png')"], {}), "(DATA_DIR, 'SegmentationObject', id[:-3] + '.png')\n", (1528, 1578), False, 'import os\n'), ((2496, 2530), 'numpy.array', 'np.array', (['(mask == label)'], {'dtype': 'int'}), '(mask == label, dtype=int)\n', (2504, 2530), True, 'import numpy as np\n'), ((2550, 2574), 'skimage.morphology.binary_erosion', 'binary_erosion', (['cur_mask'], {}), '(cur_mask)\n', (2564, 2574), False, 'from skimage.morphology import dilation, thin, binary_erosion\n'), ((2594, 2612), 'skimage.morphology.dilation', 'dilation', (['cur_mask'], {}), '(cur_mask)\n', (2602, 2612), False, 'from skimage.morphology import dilation, thin, binary_erosion\n'), ((5052, 5082), 'numpy.where', 'np.where', (['(mask == 255)', '(0)', 'mask'], {}), '(mask == 255, 0, mask)\n', (5060, 5082), True, 'import numpy as np\n'), ((5177, 5197), 'numpy.zeros', 'np.zeros', (['mask.shape'], {}), '(mask.shape)\n', (5185, 5197), True, 'import numpy as np\n'), ((5215, 5234), 'numpy.ones', 'np.ones', (['mask.shape'], {}), '(mask.shape)\n', (5222, 5234), True, 'import numpy as np\n'), ((5278, 5293), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (5287, 5293), True, 'import numpy as np\n'), ((7430, 7464), 'numpy.all', 'np.all', (['(centerness[mask == 0] == 0)'], {}), '(centerness[mask == 0] == 0)\n', (7436, 7464), True, 'import numpy as np\n'), ((7480, 7513), 'numpy.all', 'np.all', (['(centerness[mask != 0] > 0)'], {}), '(centerness[mask != 0] > 0)\n', (7486, 7513), True, 'import numpy as np\n'), ((8915, 8950), 'albumentations.Lambda', 'albu.Lambda', ([], {'image': 'preprocessing_fn'}), '(image=preprocessing_fn)\n', (8926, 8950), True, 'import albumentations as albu\n'), ((8960, 9004), 'albumentations.Lambda', 'albu.Lambda', ([], {'image': 'to_tensor', 'mask': 'to_tensor'}), '(image=to_tensor, mask=to_tensor)\n', (8971, 9004), True, 'import albumentations as albu\n'), ((15408, 15427), 'numpy.unique', 'np.unique', (['cls_mask'], {}), '(cls_mask)\n', (15417, 15427), True, 'import numpy as np\n'), ((15706, 15772), 'numpy.logical_and', 'np.logical_and', (['(labeled_array == component)', '(cls_mask == cls_belong)'], {}), '(labeled_array == component, cls_mask == cls_belong)\n', (15720, 15772), True, 'import numpy as np\n'), ((16047, 16092), 'numpy.where', 'np.where', (['(obj_mask == label)', 'labeled_array', '(0)'], {}), '(obj_mask == label, labeled_array, 0)\n', (16055, 16092), True, 'import numpy as np\n'), ((16118, 16143), 'numpy.unique', 'np.unique', (['cur_centerness'], {}), '(cur_centerness)\n', (16127, 16143), True, 'import numpy as np\n'), ((4184, 4227), 'os.path.join', 'os.path.join', (['images_dir', "(image_id + '.jpg')"], {}), "(images_dir, image_id + '.jpg')\n", (4196, 4227), False, 'import os\n'), ((4280, 4322), 'os.path.join', 'os.path.join', (['masks_dir', "(image_id + '.png')"], {}), "(masks_dir, image_id + '.png')\n", (4292, 4322), False, 'import os\n'), ((5467, 5491), 'skimage.morphology.binary_erosion', 'binary_erosion', (['cur_mask'], {}), '(cur_mask)\n', (5481, 5491), False, 'from skimage.morphology import dilation, thin, binary_erosion\n'), ((5515, 5533), 'skimage.morphology.dilation', 'dilation', (['cur_mask'], {}), '(cur_mask)\n', (5523, 5533), False, 'from skimage.morphology import dilation, thin, binary_erosion\n'), ((5854, 5888), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {'indexing': '"""ij"""'}), "(xx, yy, indexing='ij')\n", (5865, 5888), True, 'import numpy as np\n'), ((5910, 5944), 'numpy.where', 'np.where', (['(cur_mask == 1)', 'grid_x', '(0)'], {}), '(cur_mask == 1, grid_x, 0)\n', (5918, 5944), True, 'import numpy as np\n'), ((5966, 6000), 'numpy.where', 'np.where', (['(cur_mask == 1)', 'grid_y', '(0)'], {}), '(cur_mask == 1, grid_y, 0)\n', (5974, 6000), True, 'import numpy as np\n'), ((6553, 6587), 'numpy.where', 'np.where', (['(mask == label)', 'scores', '(0)'], {}), '(mask == label, scores, 0)\n', (6561, 6587), True, 'import numpy as np\n'), ((5549, 5575), 'numpy.count_nonzero', 'np.count_nonzero', (['cur_mask'], {}), '(cur_mask)\n', (5565, 5575), True, 'import numpy as np\n'), ((5774, 5798), 'numpy.arange', 'np.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (5783, 5798), True, 'import numpy as np\n'), ((5800, 5824), 'numpy.arange', 'np.arange', (['mask.shape[1]'], {}), '(mask.shape[1])\n', (5809, 5824), True, 'import numpy as np\n'), ((6295, 6325), 'numpy.absolute', 'np.absolute', (['(grid_x - center_x)'], {}), '(grid_x - center_x)\n', (6306, 6325), True, 'import numpy as np\n'), ((6362, 6392), 'numpy.absolute', 'np.absolute', (['(grid_y - center_y)'], {}), '(grid_y - center_y)\n', (6373, 6392), True, 'import numpy as np\n'), ((6413, 6445), 'numpy.sqrt', 'np.sqrt', (['(x_sqr_diff + y_sqr_diff)'], {}), '(x_sqr_diff + y_sqr_diff)\n', (6420, 6445), True, 'import numpy as np\n'), ((6511, 6530), 'numpy.divide', 'np.divide', (['(10)', 'dist'], {}), '(10, dist)\n', (6520, 6530), True, 'import numpy as np\n'), ((6705, 6719), 'numpy.sum', 'np.sum', (['scores'], {}), '(scores)\n', (6711, 6719), True, 'import numpy as np\n'), ((6757, 6800), 'numpy.where', 'np.where', (['(mask == label)', 'scores', 'centerness'], {}), '(mask == label, scores, centerness)\n', (6765, 6800), True, 'import numpy as np\n'), ((6917, 6966), 'numpy.where', 'np.where', (['(mask == label)', '(scores + inc)', 'centerness'], {}), '(mask == label, scores + inc, centerness)\n', (6925, 6966), True, 'import numpy as np\n'), ((7387, 7413), 'numpy.zeros', 'np.zeros', (['centerness.shape'], {}), '(centerness.shape)\n', (7395, 7413), True, 'import numpy as np\n'), ((14722, 14746), 'PIL.Image.open', 'Image.open', (['masks_fps[i]'], {}), '(masks_fps[i])\n', (14732, 14746), False, 'from PIL import Image\n'), ((14819, 14846), 'PIL.Image.open', 'Image.open', (['semantic_fps[i]'], {}), '(semantic_fps[i])\n', (14829, 14846), False, 'from PIL import Image\n'), ((15506, 15569), 'numpy.logical_and', 'np.logical_and', (['(labeled_array == component)', '(cls_mask == cur_cls)'], {}), '(labeled_array == component, cls_mask == cur_cls)\n', (15520, 15569), True, 'import numpy as np\n'), ((1735, 1797), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""SegmentationObject"""', "(id[:-3] + '.png')"], {}), "(DATA_DIR, 'SegmentationObject', id[:-3] + '.png')\n", (1747, 1797), False, 'import os\n'), ((4531, 4561), 'PIL.Image.open', 'Image.open', (['self.images_fps[i]'], {}), '(self.images_fps[i])\n', (4541, 4561), False, 'from PIL import Image\n'), ((4623, 4652), 'PIL.Image.open', 'Image.open', (['self.masks_fps[i]'], {}), '(self.masks_fps[i])\n', (4633, 4652), False, 'from PIL import Image\n'), ((6034, 6048), 'numpy.sum', 'np.sum', (['grid_x'], {}), '(grid_x)\n', (6040, 6048), True, 'import numpy as np\n'), ((6051, 6075), 'numpy.count_nonzero', 'np.count_nonzero', (['grid_x'], {}), '(grid_x)\n', (6067, 6075), True, 'import numpy as np\n'), ((6077, 6091), 'numpy.sum', 'np.sum', (['grid_y'], {}), '(grid_y)\n', (6083, 6091), True, 'import numpy as np\n'), ((6094, 6118), 'numpy.count_nonzero', 'np.count_nonzero', (['grid_y'], {}), '(grid_y)\n', (6110, 6118), True, 'import numpy as np\n'), ((6866, 6887), 'numpy.sum', 'np.sum', (['(mask == label)'], {}), '(mask == label)\n', (6872, 6887), True, 'import numpy as np\n'), ((14397, 14420), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (14413, 14420), False, 'import torch\n'), ((6848, 6862), 'numpy.sum', 'np.sum', (['scores'], {}), '(scores)\n', (6854, 6862), True, 'import numpy as np\n'), ((7287, 7308), 'numpy.sum', 'np.sum', (['(mask == label)'], {}), '(mask == label)\n', (7293, 7308), True, 'import numpy as np\n')]
|
import unittest.mock as umock
from argparse import ArgumentTypeError
import numpy as np
import pytest
from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, \
do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image
test_array = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
test_image = read_image("test_images/test_img.png")
args_mock = umock.MagicMock()
# zesvetleni a ztmaveni, TODO udelat aby mock vzdy vracel 50, pro pripad ze bude vic unit testu nez hodnot
args_mock.darken = [50, 50, 50, 50, 80, 70, 80, 10, 80] # TODO mockovat veci v danem testu
args_mock.lighten = [50, 50, 50, 50, 100, 70, 40, 50, 100]
def test_read_image():
assert read_image("test_images/test_img.png").ndim > 0 # jakykoliv non fail vysledek je ok
def test_image_fail():
with pytest.raises(FileNotFoundError):
read_image("nonexistent_file_please_dont_create_file_named_like_this.pls")
def test_do_embossing():
out = do_embossing(test_array)
def test_do_edge_detection():
out = do_edge_detection(test_array)
def test_do_blur_5x5():
out = do_blur_5x5(test_array)
def test_do_blur_3x3():
out = do_blur_3x3(test_array)
def test_do_sharpen():
out = do_sharpen(test_array)
def test_do_bw():
out = do_bw(test_array)
def test_do_inverse():
out = do_inverse(test_array)
assert (out == 254).all()
def test_do_darken():
out = do_darken(test_array, args_mock)
assert np.all(out == 0.5)
def test_do_lighten():
out = do_lighten(test_array, args_mock)
assert np.all(out == 1.5)
def test_do_mirror():
out = do_mirror(test_array)
assert ((test_array == out).all()) # po zrcadleni musi byt identicke
def test_do_rotate():
out = do_rotate(test_array)
assert ((test_array == out).all()) # po rotaci musi byt identicke
def test_percentage_invalid():
a = None
with pytest.raises(ArgumentTypeError):
percentage(-1)
assert (a is None)
def test_percentage_high_value():
a = None
a = percentage(10000000)
assert a == 10000000
def test_percentage_zero():
a = None
a = percentage(0)
assert a == 0
def test_rotation_identity():
# pokud otocime jeden obrazek 4x musi byt stejny jako puvodni
out = do_rotate(test_image)
out = do_rotate(out)
out = do_rotate(out)
final = do_rotate(out)
assert (final == test_image).all()
def test_mirror_identity():
out = do_mirror(test_image)
final = do_mirror(out)
assert (final == test_image).all()
def test_multiple_bw():
# vicero pouzitych bw musi vracet stejnou hodnotu
out = do_bw(test_image)
second_out = do_bw(out)
third_out = do_bw(second_out)
assert np.logical_and((out == second_out).all(), (second_out == third_out).all())
def test_compare_rotate():
out = do_rotate(test_image)
test_input = read_image("test_images/rotate.png")
assert (out == test_input).all()
def test_compare_mirror():
out = do_mirror(test_image)
test_input = read_image("test_images/mirror.png")
assert (out == test_input).all()
def test_compare_inverse():
out = do_inverse(test_image)
test_input = read_image("test_images/inverse.png")
assert (out == test_input).all()
# TODO before saving there are some slight data diference, that causes fail even if images are same
def test_compare_bw():
out = do_bw(test_image)
save_image(out, "test_images/bwOut.png")
output = read_image("test_images/bwOut.png")
test_input = read_image("test_images/bw.png")
assert (output == test_input).all()
# TODO before saving there are some slight data diference, that causes fail even if images are same
def test_compare_lighten():
out = do_lighten(test_image, args_mock)
save_image(out, "test_images/lightenOut.png")
output = read_image("test_images/lightenOut.png")
test_input = read_image("test_images/lighten.png")
assert (output == test_input).all()
# TODO before saving there are some slight data diference, that causes fail even if images are same
def test_compare_darken():
out = do_darken(test_image, args_mock)
save_image(out, "test_images/darkenOut.png")
output = read_image("test_images/darkenOut.png")
test_input = read_image("test_images/darken.png")
assert (output == test_input).all()
def test_argument_chaining_one_convolution():
# testuje funkcnost retezeni, vzajmne kompability a toho, ze si testy navzajem neznici file
# kvuli casove narocnosti testujeme pouze jednu konvolucni fci (pouzivaji stejny kod, pouze kernel se meni)
out = do_mirror(test_image)
out = do_rotate(out)
out = do_lighten(out, args_mock)
out = do_inverse(out)
out = do_darken(out, args_mock)
out = do_bw(out)
out = do_sharpen(out)
out = do_mirror(test_image)
out = do_rotate(out)
out = do_lighten(out, args_mock)
out = do_inverse(out)
out = do_darken(out, args_mock)
"""
casove narocne testy
def test_compare_sharpen():
out = do_sharpen(test_image)
test_input = read_image("test_images/sharpen.png")
assert (out == test_input).all()
def test_compare_blur_3x3():
out = do_blur_3x3(test_image)
test_input = read_image("test_images/blur3.png")
assert (out == test_input).all()
def test_compare_blur_5x5():
out = do_blur_5x5(test_image)
test_input = read_image("test_images/blur5.png")
assert (out == test_input).all()
def test_compare_edge_detection():
out = do_edge_detection(test_image)
test_input = read_image("test_images/edges.png")
assert (out == test_input).all()
def test_compare_embossing():
out = do_embossing(test_image)
test_input = read_image("test_images/embossing.png")
assert (out == test_input).all()
"""
|
[
"functions.do_bw",
"functions.do_lighten",
"unittest.mock.MagicMock",
"functions.percentage",
"functions.do_darken",
"functions.do_inverse",
"functions.do_blur_5x5",
"numpy.all",
"functions.do_rotate",
"functions.do_embossing",
"functions.save_image",
"pytest.raises",
"numpy.array",
"functions.do_mirror",
"functions.do_sharpen",
"functions.read_image",
"functions.do_blur_3x3",
"functions.do_edge_detection"
] |
[((316, 359), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (324, 359), True, 'import numpy as np\n'), ((374, 412), 'functions.read_image', 'read_image', (['"""test_images/test_img.png"""'], {}), "('test_images/test_img.png')\n", (384, 412), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((426, 443), 'unittest.mock.MagicMock', 'umock.MagicMock', ([], {}), '()\n', (441, 443), True, 'import unittest.mock as umock\n'), ((1011, 1035), 'functions.do_embossing', 'do_embossing', (['test_array'], {}), '(test_array)\n', (1023, 1035), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1078, 1107), 'functions.do_edge_detection', 'do_edge_detection', (['test_array'], {}), '(test_array)\n', (1095, 1107), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1144, 1167), 'functions.do_blur_5x5', 'do_blur_5x5', (['test_array'], {}), '(test_array)\n', (1155, 1167), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1204, 1227), 'functions.do_blur_3x3', 'do_blur_3x3', (['test_array'], {}), '(test_array)\n', (1215, 1227), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1263, 1285), 'functions.do_sharpen', 'do_sharpen', (['test_array'], {}), '(test_array)\n', (1273, 1285), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1316, 1333), 'functions.do_bw', 'do_bw', (['test_array'], {}), '(test_array)\n', (1321, 1333), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1369, 1391), 'functions.do_inverse', 'do_inverse', (['test_array'], {}), '(test_array)\n', (1379, 1391), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1456, 1488), 'functions.do_darken', 'do_darken', (['test_array', 'args_mock'], {}), '(test_array, args_mock)\n', (1465, 1488), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1500, 1518), 'numpy.all', 'np.all', (['(out == 0.5)'], {}), '(out == 0.5)\n', (1506, 1518), True, 'import numpy as np\n'), ((1554, 1587), 'functions.do_lighten', 'do_lighten', (['test_array', 'args_mock'], {}), '(test_array, args_mock)\n', (1564, 1587), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1599, 1617), 'numpy.all', 'np.all', (['(out == 1.5)'], {}), '(out == 1.5)\n', (1605, 1617), True, 'import numpy as np\n'), ((1652, 1673), 'functions.do_mirror', 'do_mirror', (['test_array'], {}), '(test_array)\n', (1661, 1673), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1782, 1803), 'functions.do_rotate', 'do_rotate', (['test_array'], {}), '(test_array)\n', (1791, 1803), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2067, 2087), 'functions.percentage', 'percentage', (['(10000000)'], {}), '(10000000)\n', (2077, 2087), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2164, 2177), 'functions.percentage', 'percentage', (['(0)'], {}), '(0)\n', (2174, 2177), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2304, 2325), 'functions.do_rotate', 'do_rotate', (['test_image'], {}), '(test_image)\n', (2313, 2325), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2336, 2350), 'functions.do_rotate', 'do_rotate', (['out'], {}), '(out)\n', (2345, 2350), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2361, 2375), 'functions.do_rotate', 'do_rotate', (['out'], {}), '(out)\n', (2370, 2375), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2388, 2402), 'functions.do_rotate', 'do_rotate', (['out'], {}), '(out)\n', (2397, 2402), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2482, 2503), 'functions.do_mirror', 'do_mirror', (['test_image'], {}), '(test_image)\n', (2491, 2503), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2516, 2530), 'functions.do_mirror', 'do_mirror', (['out'], {}), '(out)\n', (2525, 2530), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2660, 2677), 'functions.do_bw', 'do_bw', (['test_image'], {}), '(test_image)\n', (2665, 2677), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2695, 2705), 'functions.do_bw', 'do_bw', (['out'], {}), '(out)\n', (2700, 2705), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2722, 2739), 'functions.do_bw', 'do_bw', (['second_out'], {}), '(second_out)\n', (2727, 2739), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2866, 2887), 'functions.do_rotate', 'do_rotate', (['test_image'], {}), '(test_image)\n', (2875, 2887), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((2905, 2941), 'functions.read_image', 'read_image', (['"""test_images/rotate.png"""'], {}), "('test_images/rotate.png')\n", (2915, 2941), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3018, 3039), 'functions.do_mirror', 'do_mirror', (['test_image'], {}), '(test_image)\n', (3027, 3039), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3057, 3093), 'functions.read_image', 'read_image', (['"""test_images/mirror.png"""'], {}), "('test_images/mirror.png')\n", (3067, 3093), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3171, 3193), 'functions.do_inverse', 'do_inverse', (['test_image'], {}), '(test_image)\n', (3181, 3193), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3211, 3248), 'functions.read_image', 'read_image', (['"""test_images/inverse.png"""'], {}), "('test_images/inverse.png')\n", (3221, 3248), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3421, 3438), 'functions.do_bw', 'do_bw', (['test_image'], {}), '(test_image)\n', (3426, 3438), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3443, 3483), 'functions.save_image', 'save_image', (['out', '"""test_images/bwOut.png"""'], {}), "(out, 'test_images/bwOut.png')\n", (3453, 3483), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3497, 3532), 'functions.read_image', 'read_image', (['"""test_images/bwOut.png"""'], {}), "('test_images/bwOut.png')\n", (3507, 3532), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3550, 3582), 'functions.read_image', 'read_image', (['"""test_images/bw.png"""'], {}), "('test_images/bw.png')\n", (3560, 3582), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3763, 3796), 'functions.do_lighten', 'do_lighten', (['test_image', 'args_mock'], {}), '(test_image, args_mock)\n', (3773, 3796), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3801, 3846), 'functions.save_image', 'save_image', (['out', '"""test_images/lightenOut.png"""'], {}), "(out, 'test_images/lightenOut.png')\n", (3811, 3846), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3860, 3900), 'functions.read_image', 'read_image', (['"""test_images/lightenOut.png"""'], {}), "('test_images/lightenOut.png')\n", (3870, 3900), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((3918, 3955), 'functions.read_image', 'read_image', (['"""test_images/lighten.png"""'], {}), "('test_images/lighten.png')\n", (3928, 3955), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4135, 4167), 'functions.do_darken', 'do_darken', (['test_image', 'args_mock'], {}), '(test_image, args_mock)\n', (4144, 4167), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4172, 4216), 'functions.save_image', 'save_image', (['out', '"""test_images/darkenOut.png"""'], {}), "(out, 'test_images/darkenOut.png')\n", (4182, 4216), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4230, 4269), 'functions.read_image', 'read_image', (['"""test_images/darkenOut.png"""'], {}), "('test_images/darkenOut.png')\n", (4240, 4269), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4287, 4323), 'functions.read_image', 'read_image', (['"""test_images/darken.png"""'], {}), "('test_images/darken.png')\n", (4297, 4323), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4630, 4651), 'functions.do_mirror', 'do_mirror', (['test_image'], {}), '(test_image)\n', (4639, 4651), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4662, 4676), 'functions.do_rotate', 'do_rotate', (['out'], {}), '(out)\n', (4671, 4676), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4687, 4713), 'functions.do_lighten', 'do_lighten', (['out', 'args_mock'], {}), '(out, args_mock)\n', (4697, 4713), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4724, 4739), 'functions.do_inverse', 'do_inverse', (['out'], {}), '(out)\n', (4734, 4739), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4750, 4775), 'functions.do_darken', 'do_darken', (['out', 'args_mock'], {}), '(out, args_mock)\n', (4759, 4775), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4786, 4796), 'functions.do_bw', 'do_bw', (['out'], {}), '(out)\n', (4791, 4796), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4807, 4822), 'functions.do_sharpen', 'do_sharpen', (['out'], {}), '(out)\n', (4817, 4822), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4833, 4854), 'functions.do_mirror', 'do_mirror', (['test_image'], {}), '(test_image)\n', (4842, 4854), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4865, 4879), 'functions.do_rotate', 'do_rotate', (['out'], {}), '(out)\n', (4874, 4879), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4890, 4916), 'functions.do_lighten', 'do_lighten', (['out', 'args_mock'], {}), '(out, args_mock)\n', (4900, 4916), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4927, 4942), 'functions.do_inverse', 'do_inverse', (['out'], {}), '(out)\n', (4937, 4942), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((4953, 4978), 'functions.do_darken', 'do_darken', (['out', 'args_mock'], {}), '(out, args_mock)\n', (4962, 4978), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((857, 889), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (870, 889), False, 'import pytest\n'), ((899, 973), 'functions.read_image', 'read_image', (['"""nonexistent_file_please_dont_create_file_named_like_this.pls"""'], {}), "('nonexistent_file_please_dont_create_file_named_like_this.pls')\n", (909, 973), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((1930, 1962), 'pytest.raises', 'pytest.raises', (['ArgumentTypeError'], {}), '(ArgumentTypeError)\n', (1943, 1962), False, 'import pytest\n'), ((1972, 1986), 'functions.percentage', 'percentage', (['(-1)'], {}), '(-1)\n', (1982, 1986), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n'), ((738, 776), 'functions.read_image', 'read_image', (['"""test_images/test_img.png"""'], {}), "('test_images/test_img.png')\n", (748, 776), False, 'from functions import do_embossing, do_edge_detection, do_blur_5x5, do_blur_3x3, do_sharpen, do_bw, do_darken, do_inverse, do_lighten, do_mirror, do_rotate, percentage, read_image, save_image\n')]
|
import math
def equation(x):
y = math.sqrt(1 - math.pow(x,2))
return y
def mean(big,small):
mean = (big+small)/2
return mean
piece, radius = 0, 1
n = int(input())
small = 0
bottom = []
for i in range(0,n):
piece = float("%.6f" % (piece+(radius/n)))
bottom.append(piece)
print(bottom)
top = []
for i in bottom:
a = equation(i)
top.append(a)
#print(top)
width = bottom[0]
big = radius * width
for i in top:
big += i * width
small += i * width
#print(small)
res = mean(big,small)
print(res)
|
[
"math.pow"
] |
[((51, 65), 'math.pow', 'math.pow', (['x', '(2)'], {}), '(x, 2)\n', (59, 65), False, 'import math\n')]
|
from flask import Flask
from flask_cors import CORS, cross_origin
from conversational_wrapper import ConversationalWrapper
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='The path to your collection of Markdown files.', type=str)
args = parser.parse_args()
app = Flask(__name__)
cors = CORS(app)
cw = ConversationalWrapper(args.path)
@app.route('/query/<query>')
@cross_origin()
def respond_query(query):
return cw.respond(query)
if __name__ == '__main__':
app.run()
|
[
"argparse.ArgumentParser",
"flask_cors.CORS",
"flask.Flask",
"flask_cors.cross_origin",
"conversational_wrapper.ConversationalWrapper"
] |
[((149, 174), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (172, 174), False, 'import argparse\n'), ((304, 319), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (309, 319), False, 'from flask import Flask\n'), ((327, 336), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (331, 336), False, 'from flask_cors import CORS, cross_origin\n'), ((342, 374), 'conversational_wrapper.ConversationalWrapper', 'ConversationalWrapper', (['args.path'], {}), '(args.path)\n', (363, 374), False, 'from conversational_wrapper import ConversationalWrapper\n'), ((406, 420), 'flask_cors.cross_origin', 'cross_origin', ([], {}), '()\n', (418, 420), False, 'from flask_cors import CORS, cross_origin\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
r"""
A example workflow for task switch.
This example will create four task in single workflow, with three shell task and one switch task. Task switch
have one upstream which we declare explicit with syntax `parent >> switch`, and two downstream automatically
set dependence by switch task by passing parameter `condition`. The graph of this workflow like:
--> switch_child_1
/
parent -> switch ->
\
--> switch_child_2
.
"""
from pydolphinscheduler.core.process_definition import ProcessDefinition
from pydolphinscheduler.tasks.shell import Shell
from pydolphinscheduler.tasks.switch import Branch, Default, Switch, SwitchCondition
with ProcessDefinition(
name="task_switch_example",
tenant="tenant_exists",
) as pd:
parent = Shell(name="parent", command="echo parent")
switch_child_1 = Shell(name="switch_child_1", command="echo switch_child_1")
switch_child_2 = Shell(name="switch_child_2", command="echo switch_child_2")
switch_condition = SwitchCondition(
Branch(condition="${var} > 1", task=switch_child_1),
Default(task=switch_child_2),
)
switch = Switch(name="switch", condition=switch_condition)
parent >> switch
pd.submit()
|
[
"pydolphinscheduler.tasks.switch.Switch",
"pydolphinscheduler.tasks.switch.Default",
"pydolphinscheduler.tasks.shell.Shell",
"pydolphinscheduler.tasks.switch.Branch",
"pydolphinscheduler.core.process_definition.ProcessDefinition"
] |
[((1510, 1579), 'pydolphinscheduler.core.process_definition.ProcessDefinition', 'ProcessDefinition', ([], {'name': '"""task_switch_example"""', 'tenant': '"""tenant_exists"""'}), "(name='task_switch_example', tenant='tenant_exists')\n", (1527, 1579), False, 'from pydolphinscheduler.core.process_definition import ProcessDefinition\n'), ((1611, 1654), 'pydolphinscheduler.tasks.shell.Shell', 'Shell', ([], {'name': '"""parent"""', 'command': '"""echo parent"""'}), "(name='parent', command='echo parent')\n", (1616, 1654), False, 'from pydolphinscheduler.tasks.shell import Shell\n'), ((1676, 1735), 'pydolphinscheduler.tasks.shell.Shell', 'Shell', ([], {'name': '"""switch_child_1"""', 'command': '"""echo switch_child_1"""'}), "(name='switch_child_1', command='echo switch_child_1')\n", (1681, 1735), False, 'from pydolphinscheduler.tasks.shell import Shell\n'), ((1757, 1816), 'pydolphinscheduler.tasks.shell.Shell', 'Shell', ([], {'name': '"""switch_child_2"""', 'command': '"""echo switch_child_2"""'}), "(name='switch_child_2', command='echo switch_child_2')\n", (1762, 1816), False, 'from pydolphinscheduler.tasks.shell import Shell\n'), ((1976, 2025), 'pydolphinscheduler.tasks.switch.Switch', 'Switch', ([], {'name': '"""switch"""', 'condition': 'switch_condition'}), "(name='switch', condition=switch_condition)\n", (1982, 2025), False, 'from pydolphinscheduler.tasks.switch import Branch, Default, Switch, SwitchCondition\n'), ((1865, 1916), 'pydolphinscheduler.tasks.switch.Branch', 'Branch', ([], {'condition': '"""${var} > 1"""', 'task': 'switch_child_1'}), "(condition='${var} > 1', task=switch_child_1)\n", (1871, 1916), False, 'from pydolphinscheduler.tasks.switch import Branch, Default, Switch, SwitchCondition\n'), ((1926, 1954), 'pydolphinscheduler.tasks.switch.Default', 'Default', ([], {'task': 'switch_child_2'}), '(task=switch_child_2)\n', (1933, 1954), False, 'from pydolphinscheduler.tasks.switch import Branch, Default, Switch, SwitchCondition\n')]
|
# Copyright 2022 eprbell
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Dict, Optional, Set, Union
from rp2.rp2_error import RP2ValueError
# Configuration file keywords
class Keyword(Enum):
AIRDROP: str = "airdrop"
ASSET: str = "asset"
BUY: str = "buy"
CRYPTO_FEE: str = "crypto_fee"
CRYPTO_IN: str = "crypto_in"
CRYPTO_OUT_NO_FEE: str = "crypto_out_no_fee"
CRYPTO_OUT_WITH_FEE: str = "crypto_out_with_fee"
CRYPTO_RECEIVED: str = "crypto_received"
CRYPTO_SENT: str = "crypto_sent"
DONATE: str = "donate"
EXCHANGE: str = "exchange"
FEE: str = "fee"
FIAT_FEE: str = "fiat_fee"
FIAT_IN_NO_FEE: str = "fiat_in_no_fee"
FIAT_IN_WITH_FEE: str = "fiat_in_with_fee"
FIAT_OUT_NO_FEE: str = "fiat_out_no_fee"
FIAT_TICKER: str = "fiat_ticker"
FROM_EXCHANGE: str = "from_exchange"
FROM_HOLDER: str = "from_holder"
GIFT: str = "gift"
HARDFORK: str = "hardfork"
HISTORICAL_MARKET_DATA: str = "historical_market_data" # Deprecated
HISTORICAL_PAIR_CONVERTERS: str = "historical_pair_converters"
HISTORICAL_PRICE_CLOSE: str = "close"
HISTORICAL_PRICE_HIGH: str = "high"
HISTORICAL_PRICE_LOW: str = "low"
HISTORICAL_PRICE_NEAREST: str = "nearest"
HISTORICAL_PRICE_OPEN: str = "open"
HOLDER: str = "holder"
IN: str = "in"
INCOME: str = "income"
INTEREST: str = "interest"
INTRA: str = "intra"
IN_HEADER: str = "in_header"
INTRA_HEADER: str = "intra_header"
IS_SPOT_PRICE_FROM_WEB: str = "is_spot_price_from_web"
MINING: str = "mining"
MOVE: str = "move"
NATIVE_FIAT: str = "native_fiat"
NOTES: str = "notes"
OUT: str = "out"
OUT_HEADER: str = "out_header"
PLUGIN: str = "plugin"
RAW_DATA: str = "raw_data"
SELL: str = "sell"
SPOT_PRICE: str = "spot_price"
STAKING: str = "staking"
TIMESTAMP: str = "timestamp"
TO_EXCHANGE: str = "to_exchange"
TO_HOLDER: str = "to_holder"
TRANSACTION_HINTS: str = "transaction_hints"
TRANSACTION_TYPE: str = "transaction_type"
UNIQUE_ID: str = "unique_id"
UNKNOWN = "__unknown"
WAGES: str = "wages"
@classmethod
def has_value(cls, value: str) -> bool:
return value in _keyword_values
@classmethod
def type_check_from_string(cls, keyword: str) -> "Keyword":
if not Keyword.has_value(keyword.lower()):
raise RP2ValueError(f"Invalid keyword: {keyword}")
return Keyword[keyword.upper()]
_keyword_values: Set[str] = {item.value for item in Keyword}
# List of supported fiat currencies
_FIAT_SET: Set[str] = {"AUD", "CAD", "CHF", "CNY", "EUR", "GBP", "HKD", "ILS", "INR", "JPY", "KRW", "SEK", "USD"}
_FIAT_FIELD_SET: Set[str] = {
Keyword.FIAT_FEE.value,
Keyword.FIAT_IN_NO_FEE.value,
Keyword.FIAT_IN_WITH_FEE.value,
Keyword.FIAT_OUT_NO_FEE.value,
Keyword.SPOT_PRICE.value,
}
_CRYPTO_FIELD_SET: Set[str] = {
Keyword.CRYPTO_FEE.value,
Keyword.CRYPTO_IN.value,
Keyword.CRYPTO_OUT_NO_FEE.value,
Keyword.CRYPTO_OUT_WITH_FEE.value,
Keyword.CRYPTO_RECEIVED.value,
Keyword.CRYPTO_SENT.value,
}
_INTERNAL_FIELD_SET: Set[str] = {
Keyword.FIAT_TICKER.value,
Keyword.IS_SPOT_PRICE_FROM_WEB.value,
Keyword.PLUGIN.value,
Keyword.RAW_DATA.value,
}
DIRECTION_SET: Set[str] = {
Keyword.IN.value,
Keyword.OUT.value,
Keyword.INTRA.value,
}
DIRECTION_2_TRANSACTION_TYPE_SET: Dict[str, Set[str]] = {
Keyword.IN.value: {
Keyword.AIRDROP.value,
Keyword.BUY.value,
Keyword.DONATE.value,
Keyword.GIFT.value,
Keyword.HARDFORK.value,
Keyword.INCOME.value,
Keyword.INTEREST.value,
Keyword.MINING.value,
Keyword.STAKING.value,
Keyword.WAGES.value,
},
Keyword.OUT.value: {
Keyword.DONATE.value,
Keyword.GIFT.value,
Keyword.FEE.value,
Keyword.SELL.value,
},
Keyword.INTRA.value: {
Keyword.MOVE.value,
},
}
HISTORICAL_PRICE_KEYWORD_SET: Set[str] = {
Keyword.HISTORICAL_PRICE_CLOSE.value,
Keyword.HISTORICAL_PRICE_HIGH.value,
Keyword.HISTORICAL_PRICE_LOW.value,
Keyword.HISTORICAL_PRICE_NEAREST.value,
Keyword.HISTORICAL_PRICE_OPEN.value,
}
BUILTIN_CONFIGURATION_SECTIONS: Set[str] = {
Keyword.TRANSACTION_HINTS.value,
Keyword.IN_HEADER.value,
Keyword.OUT_HEADER.value,
Keyword.INTRA_HEADER.value,
}
DEFAULT_CONFIGURATION: Dict[str, Union[Dict[str, int], Dict[str, str]]] = {
Keyword.IN_HEADER.value: {
Keyword.TIMESTAMP.value: 0,
Keyword.ASSET.value: 1,
Keyword.EXCHANGE.value: 2,
Keyword.HOLDER.value: 3,
Keyword.TRANSACTION_TYPE.value: 4,
Keyword.SPOT_PRICE.value: 6,
Keyword.CRYPTO_IN.value: 7,
Keyword.CRYPTO_FEE.value: 8,
Keyword.FIAT_IN_NO_FEE.value: 9,
Keyword.FIAT_IN_WITH_FEE.value: 10,
Keyword.FIAT_FEE.value: 11,
Keyword.UNIQUE_ID.value: 12,
Keyword.NOTES.value: 13,
},
Keyword.OUT_HEADER.value: {
Keyword.TIMESTAMP.value: 0,
Keyword.ASSET.value: 1,
Keyword.EXCHANGE.value: 2,
Keyword.HOLDER.value: 3,
Keyword.TRANSACTION_TYPE.value: 4,
Keyword.SPOT_PRICE.value: 6,
Keyword.CRYPTO_OUT_NO_FEE.value: 7,
Keyword.CRYPTO_FEE.value: 8,
Keyword.CRYPTO_OUT_WITH_FEE.value: 9,
Keyword.FIAT_OUT_NO_FEE.value: 10,
Keyword.FIAT_FEE.value: 11,
Keyword.UNIQUE_ID.value: 12,
Keyword.NOTES.value: 13,
},
Keyword.INTRA_HEADER.value: {
Keyword.TIMESTAMP.value: 0,
Keyword.ASSET.value: 1,
Keyword.FROM_EXCHANGE.value: 2,
Keyword.FROM_HOLDER.value: 3,
Keyword.TO_EXCHANGE.value: 4,
Keyword.TO_HOLDER.value: 5,
Keyword.SPOT_PRICE.value: 6,
Keyword.CRYPTO_SENT.value: 7,
Keyword.CRYPTO_RECEIVED.value: 8,
Keyword.UNIQUE_ID.value: 12,
Keyword.NOTES.value: 13,
},
}
def is_builtin_section_name(section_name: str) -> bool:
return section_name in BUILTIN_CONFIGURATION_SECTIONS
def is_fiat_field(field: str) -> bool:
return field in _FIAT_FIELD_SET
def is_crypto_field(field: str) -> bool:
return field in _CRYPTO_FIELD_SET
def is_internal_field(field: str) -> bool:
return field in _INTERNAL_FIELD_SET
def is_fiat(currency: str) -> bool:
return currency in _FIAT_SET
def is_unknown(value: str) -> bool:
return value == Keyword.UNKNOWN.value
def is_unknown_or_none(value: Optional[str]) -> bool:
return value in {Keyword.UNKNOWN.value, None}
def is_transaction_type_valid(direction: str, transaction_type: str) -> bool:
return transaction_type.lower() in DIRECTION_2_TRANSACTION_TYPE_SET[direction]
|
[
"rp2.rp2_error.RP2ValueError"
] |
[((2930, 2974), 'rp2.rp2_error.RP2ValueError', 'RP2ValueError', (['f"""Invalid keyword: {keyword}"""'], {}), "(f'Invalid keyword: {keyword}')\n", (2943, 2974), False, 'from rp2.rp2_error import RP2ValueError\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
sys.path.append("../")
from nmutant_util.utils_file import get_data_file
from nmutant_data.data import get_data
from nmutant_util.utils_imgproc import deprocess_image_1
FLAGS = flags.FLAGS
def ass(datasets, model, samples_path):
"""
:param datasets
:param model
:param samples_path
:return:
"""
tf.reset_default_graph()
X_train, Y_train, X_test, Y_test = get_data(datasets)
X_test = [deprocess_image_1(np.asarray([image])) for image in X_test]
[image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path)
if datasets=='cifar10':
image_list = [(img*255).reshape(img.shape[0], img.shape[1], img.shape[2]) for img in image_list]
else:
image_list = [(img*255).reshape(img.shape[0], img.shape[1]) for img in image_list]
result = 0
for i in range(len(image_list)):
index = int(image_files[i].split('_')[-4])
result = result + ssim(np.asarray(image_list[i]), np.asarray(X_test[index]))
result = result / len(image_list)
print('average structural similarity is %.4f' % (result))
return result
def ssim(adv, ori):
#change to gray pic
if 3 == len(adv.shape):
adv = adv * np.asarray([0.3 , 0.59, 0.11])
adv = adv.sum(axis=2)
ori = ori * np.asarray([0.3 , 0.59, 0.11])
ori = ori.sum(axis=2)
adv = adv.reshape(-1)
ori = ori.reshape(-1)
c1 = (0.01 * 255) ** 2
c2 = (0.03 * 255) ** 2
c3 = c2 / 2
alpha = 1
beta = 1
gama = 1
miu_x = adv.mean()
miu_y = ori.mean()
theta_x = adv.std(ddof=1)
theta_y = ori.std(ddof=1)
theta_xy = sum((adv - miu_x) * (ori - miu_y)) / (len(adv) - 1)
l = (2 * miu_x * miu_y + c1) / (miu_x ** 2 + miu_y ** 2 + c1)
c = (2 * theta_x * theta_y + c2) / (theta_x ** 2 + theta_y ** 2 + c2)
s = (theta_xy + c3) / (theta_x * theta_y + c3)
return (l ** alpha) * (c ** beta) * (s ** gama)
def main(argv=None):
ass(datasets = FLAGS.datasets,
model=FLAGS.model,
samples_path=FLAGS.samples)
if __name__ == '__main__':
flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')
flags.DEFINE_string('model', 'lenet1', 'The name of model')
flags.DEFINE_string('samples', '../mt_result/cifar10_jsma/adv_jsma', 'The path to load samples.')
tf.app.run()
|
[
"sys.path.append",
"tensorflow.python.platform.flags.DEFINE_string",
"tensorflow.reset_default_graph",
"nmutant_util.utils_file.get_data_file",
"numpy.asarray",
"nmutant_data.data.get_data",
"tensorflow.app.run"
] |
[((251, 273), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (266, 273), False, 'import sys\n'), ((577, 601), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (599, 601), True, 'import tensorflow as tf\n'), ((641, 659), 'nmutant_data.data.get_data', 'get_data', (['datasets'], {}), '(datasets)\n', (649, 659), False, 'from nmutant_data.data import get_data\n'), ((798, 825), 'nmutant_util.utils_file.get_data_file', 'get_data_file', (['samples_path'], {}), '(samples_path)\n', (811, 825), False, 'from nmutant_util.utils_file import get_data_file\n'), ((2338, 2402), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""datasets"""', '"""mnist"""', '"""The target datasets."""'], {}), "('datasets', 'mnist', 'The target datasets.')\n", (2357, 2402), False, 'from tensorflow.python.platform import flags\n'), ((2407, 2466), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""', '"""lenet1"""', '"""The name of model"""'], {}), "('model', 'lenet1', 'The name of model')\n", (2426, 2466), False, 'from tensorflow.python.platform import flags\n'), ((2471, 2572), 'tensorflow.python.platform.flags.DEFINE_string', 'flags.DEFINE_string', (['"""samples"""', '"""../mt_result/cifar10_jsma/adv_jsma"""', '"""The path to load samples."""'], {}), "('samples', '../mt_result/cifar10_jsma/adv_jsma',\n 'The path to load samples.')\n", (2490, 2572), False, 'from tensorflow.python.platform import flags\n'), ((2574, 2586), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (2584, 2586), True, 'import tensorflow as tf\n'), ((692, 711), 'numpy.asarray', 'np.asarray', (['[image]'], {}), '([image])\n', (702, 711), True, 'import numpy as np\n'), ((1461, 1490), 'numpy.asarray', 'np.asarray', (['[0.3, 0.59, 0.11]'], {}), '([0.3, 0.59, 0.11])\n', (1471, 1490), True, 'import numpy as np\n'), ((1542, 1571), 'numpy.asarray', 'np.asarray', (['[0.3, 0.59, 0.11]'], {}), '([0.3, 0.59, 0.11])\n', (1552, 1571), True, 'import numpy as np\n'), ((1194, 1219), 'numpy.asarray', 'np.asarray', (['image_list[i]'], {}), '(image_list[i])\n', (1204, 1219), True, 'import numpy as np\n'), ((1221, 1246), 'numpy.asarray', 'np.asarray', (['X_test[index]'], {}), '(X_test[index])\n', (1231, 1246), True, 'import numpy as np\n')]
|
from tensorflow.keras.layers import (Input, Reshape, Dense, Conv2D, Layer,
BatchNormalization, UpSampling2D,
Dropout, Flatten, Conv2DTranspose,
)
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
from models.basemodel import BaseModel, DataLoader, np, datetime, plt
from functools import partial
class RandomWeightAverage(Layer):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
def get_config(self):
config = {'batch_size': self.batch_size}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
alpha = K.random_uniform((self.batch_size, 1, 1, 1))
return alpha * inputs[0] + (1 - alpha) * inputs[1]
class WGANGP(BaseModel):
def __init__(self,
input_dim = (28, 28, 1),
di_conv_filters = [64, 128, 512, 1024],
di_conv_kernels = [5, 5, 5, 5],
di_conv_strides = [2, 2, 2, 1],
di_dropout = None,
di_active = 'leakyrelu',
di_lr = 0.00005,
ge_initial_size = (7, 7, 64),
ge_upsample = [2, 2, 1, 1],
ge_conv_filters = [512, 256, 128, 1],
ge_conv_kernels = [5, 5, 5, 5],
ge_conv_strides = [1, 1, 1, 1],
ge_batch_norm = 0.8,
ge_dropout = None,
ge_active = 'leakyrelu',
ge_lr = 0.00005,
optimizer = 'rmsprop',
beta1 = 0.5,
z_dim = 100,
weight_init = (0, 0.02),
grad_weights = 10,
start_epoch = 0,
k = 5,
loader = None,
data_name = 'mnist',
ID = 0,
color = 'gray',
batch_size = 64
):
self.input_dim = input_dim
self.di_conv_filters = di_conv_filters
self.di_conv_kernels = di_conv_kernels
self.di_conv_strides = di_conv_strides
self.di_dropout = di_dropout
self.di_active = di_active
self.di_lr = di_lr
self.ge_initial_size = ge_initial_size
self.ge_upsample = ge_upsample
self.ge_conv_filters = ge_conv_filters
self.ge_conv_kernels = ge_conv_kernels
self.ge_conv_strides = ge_conv_strides
self.ge_batch_norm = ge_batch_norm
self.ge_dropout = ge_dropout
self.ge_active = ge_active
self.ge_lr = ge_lr
self.opt = optimizer
self.beta1 = beta1
self.z_dim = z_dim
if weight_init is None:
weight_init = (0, 1)
self.weight_init = RandomNormal(mean=weight_init[0],
stddev=weight_init[1])
self.grad_weights = grad_weights
self.epochs = start_epoch
self.k = k
self.data_name = data_name
if loader is None:
self.loader = DataLoader(data_name, ID, shape=input_dim, color=color, section='WGANGP')
self.color = color
self.batch_size = batch_size
self.di_len = len(di_conv_filters)
self.ge_len = len(ge_conv_filters)
self.di_sample = []
super().__init__()
self.build_gengerator()
self.build_discriminator()
self.build_adversarial()
def wassarstein(self, real, pred):
return -K.mean(real * pred)
def gradinet_penalty_loss(self, real, pred, samples):
grads = K.gradients(pred, samples)[0]
grad_l2_norm = K.sqrt(K.sum(K.square(grads), axis=np.arange(1, len(grads.shape))))
grad_penalty = K.square(1 - grad_l2_norm)
return K.mean(grad_penalty)
def build_gengerator(self):
ge_input = Input(shape=(self.z_dim, ), name='Generator_input')
x = ge_input
x = Dense(np.prod(self.ge_initial_size),
kernel_initializer=self.weight_init
)(x)
if self.ge_batch_norm is not None:
x = BatchNormalization(momentum=self.ge_batch_norm)(x)
x = self.activation(self.ge_active)(x)
x = Reshape(target_shape=self.ge_initial_size)(x)
if self.ge_dropout is not None:
x = Dropout(self.ge_dropout)(x)
for i in range(self.ge_len):
if self.ge_upsample[i] == 2:
x = UpSampling2D()(x)
x = Conv2D(
filters=self.ge_conv_filters[i],
kernel_size=self.ge_conv_kernels[i],
strides=self.ge_conv_strides[i],
padding='same',
name='ge_conv_{}'.format(i),
kernel_initializer=self.weight_init
)(x)
else:
x = Conv2DTranspose(
filters=self.ge_conv_filters[i],
kernel_size=self.ge_conv_kernels[i],
strides=self.ge_conv_strides[i],
padding='same',
kernel_initializer=self.weight_init,
name='ge_conv2Trans_{}'.format(i)
)(x)
if i < self.ge_len - 1:
if self.ge_batch_norm is not None:
x = BatchNormalization(momentum=self.ge_batch_norm)(x)
x = self.activation(self.ge_active)(x)
else:
x = self.activation('tanh')(x)
self.generator = Model(ge_input, x)
def build_discriminator(self):
di_input = Input(shape=self.input_dim, name='Discriminator_input')
x = di_input
for i in range(self.di_len):
x = Conv2D(
filters=self.di_conv_filters[i],
kernel_size=self.di_conv_kernels[i],
strides=self.di_conv_strides[i],
padding='same',
kernel_initializer=self.weight_init,
name='di_conv_{}'.format(i)
)(x)
x = self.activation(self.di_active)(x)
if self.di_dropout is not None:
x = Dropout(self.di_dropout)(x)
x = Flatten()(x)
x = Dense(1, activation=None, kernel_initializer=self.weight_init)(x)
self.discriminator = Model(di_input, x)
def build_adversarial(self):
self.trainable(self.generator, False)
real_imgs = Input(shape=self.input_dim, )
z = Input(shape=(self.z_dim, ), )
fake_imgs = self.generator(z)
real = self.discriminator(real_imgs)
fake = self.discriminator(fake_imgs)
samples_imgs = RandomWeightAverage(self.batch_size)([real_imgs, fake_imgs])
samples = self.discriminator(samples_imgs)
gp_lss = partial(self.gradinet_penalty_loss,
samples=samples)
gp_lss.__name__ = 'gradient_penalty'
self.discriminator_model = Model(inputs=[real_imgs, z],
outputs=[real, fake, samples])
self.discriminator_model.compile(
loss=[self.wassarstein, self.wassarstein, gp_lss],
optimizer=self.optimizer(self.di_lr),
loss_weights=[1, 1, self.grad_weights],
experimental_run_tf_function=False
)
self.trainable(self.generator, True)
self.trainable(self.discriminator, False)
combined_input = Input(shape=(self.z_dim, ))
combined_output = self.discriminator(self.generator(combined_input))
self.combined = Model(combined_input, combined_output)
self.combined.compile(
optimizer=self.optimizer(self.ge_lr),
loss=self.wassarstein,
metrics=['accuracy'],
experimental_run_tf_function=False
)
self.trainable(self.discriminator, True)
self.models.setdefault('Discriminator', self.discriminator)
self.models.setdefault('Generator', self.generator)
self.models.setdefault('Whole_Discriminator', self.discriminator_model)
self.models.setdefault('Combined', self.combined)
def train_generator(self):
real = np.ones((self.batch_size, 1))
z = np.random.normal(0, 1, (self.batch_size, self.z_dim))
return self.combined.train_on_batch(z, real)
def train_discriminator(self, x_train, clip):
real = np.ones((self.batch_size, 1))
fake = -np.ones((self.batch_size, 1))
zeros = np.zeros((self.batch_size, 1))
idcs = np.random.randint(0, x_train.shape[0], self.batch_size)
real_imgs = x_train[idcs]
z = np.random.normal(0, 1, (self.batch_size, self.z_dim))
di_tot, di_real, di_fake, di_mid = self.discriminator_model.train_on_batch([real_imgs, z], [real, fake, zeros])
return di_tot, di_real, di_fake, di_mid
def train(self,
max_epochs,
show_every_n,
clip=0.01,
train_data=None,
train_batch=False):
s = datetime.now()
if train_batch:
for epoch in range(self.epochs, max_epochs):
for batch_i, (x_train, y_train) in enumerate(self.loader.load_batch()):
for k in range(self.k):
di = self.train_discriminator(x_train, clip)
ge = self.train_generator()
self.di_real_lss.append(di[1])
self.di_fake_lss.append(di[2])
self.di_lss.append(di[0])
self.di_sample.append(di[3])
self.ge_lss.append(ge[0])
if epoch % show_every_n == 0:
elapsed_time = datetime.now() - s
print ('[Epochs %d/%d] [Batch %d/%d] [D Total %.4f Real %.4f Fake %.4f Sam %.4f] [G %.4f] %s' %
(epoch, max_epochs,
batch_i, self.loader.n_batches,
di[0], di[1], di[2], di[3], ge[0], elapsed_time))
self.show_img(self.generator, self.z_dim, 'sample_{}.png'.format(epoch), color='gray')
self.save_weights(file_name='weights_{}.h5'.format(epoch))
self.save_models(epoch=epoch)
self.show_img(self.generator, self.z_dim, 'sample_last.png', color='gray')
self.save_models()
self.plot_loss()
else:
if len(train_data) == 1:
x_train = train_data[0]
else:
x_train, t_train = train_data
for epoch in range(max_epochs):
for j in range(self.k):
di = self.train_discriminator(x_train, clip)
ge = self.train_generator()
self.di_real_lss.append(di[1])
self.di_fake_lss.append(di[2])
self.di_lss.append(di[0])
self.di_sample.append(di[3])
self.ge_lss.append(ge[0])
if epoch % show_every_n == 0:
elapsed_time = datetime.now() - s
print ('[Epochs %d/%d] [D Total %.4f Real %.4f Fake %.4f Sam %.4f] [G %.4f] Time %s' %
(epoch, max_epochs,
di[0], di[1], di[2], di[3], ge[0], elapsed_time))
self.show_img(self.generator, self.z_dim, file_name='sample_{}.png'.format(epoch), color='gray')
self.save_weights(file_name='weights_{}.h5'.format(epoch))
self.save_models(epoch=epoch)
self.show_img(self.generator, self.z_dim, 'sample_last.png', color='gray')
self.save_models()
self.plot_loss()
def plot_loss(self):
fig = plt.figure(figsize=(150, 100))
ax1 = fig.add_subplot(231)
ax1.set_xlim([0, len(self.di_real_lss)])
ax1.set_title('Discriminator Real Loss')
ax1.set_xlabel('Epochs')
ax1.set_ylabel('Loss')
ax1.plot(len(self.di_real_lss), self.di_real_lss)
ax2 = fig.add_subplot(232)
ax2.set_xlim([0, len(self.di_fake_lss)])
ax2.set_title('Discriminator Fake Loss')
ax2.set_xlabel('Epochs')
ax2.set_ylabel('Loss')
ax2.plot(len(self.di_fake_lss), self.di_fake_lss)
ax3 = fig.add_subplot(233)
ax3.set_xlim([0, len(self.di_lss)])
ax3.set_title('Discriminator Loss')
ax3.set_xlabel('Epochs')
ax3.set_ylabel('Loss')
ax3.plot(len(self.di_lss), self.di_lss)
ax4 = fig.add_subplot(234)
ax4.set_xlim([0, len(self.ge_lss)])
ax4.set_title('Generator Loss')
ax4.set_xlabel('Epochs')
ax4.set_ylabel('Loss')
ax4.plot(len(self.ge_lss), self.ge_lss)
ax5 = fig.add_subplot(235)
ax5.set_xlim([0, len(self.di_acc)])
ax5.set_ylim([0, 100])
ax5.set_title('Discriminator Accuracy')
ax5.set_xlabel('Epochs')
ax5.set_ylabel('Accuracy')
ax5.plot(len(self.di_acc), self.di_acc)
plt.show()
plt.cla()
plt.clf()
|
[
"tensorflow.python.framework.ops.disable_eager_execution",
"models.basemodel.np.ones",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"models.basemodel.DataLoader",
"models.basemodel.plt.clf",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.BatchNormalization",
"models.basemodel.plt.figure",
"models.basemodel.np.random.normal",
"tensorflow.keras.backend.random_uniform",
"tensorflow.keras.backend.gradients",
"models.basemodel.np.prod",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.UpSampling2D",
"models.basemodel.np.zeros",
"functools.partial",
"models.basemodel.plt.show",
"tensorflow.keras.layers.Dropout",
"models.basemodel.datetime.now",
"models.basemodel.plt.cla",
"tensorflow.keras.models.Model",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.backend.square",
"tensorflow.keras.backend.mean",
"models.basemodel.np.random.randint"
] |
[((464, 489), 'tensorflow.python.framework.ops.disable_eager_execution', 'disable_eager_execution', ([], {}), '()\n', (487, 489), False, 'from tensorflow.python.framework.ops import disable_eager_execution\n'), ((963, 1007), 'tensorflow.keras.backend.random_uniform', 'K.random_uniform', (['(self.batch_size, 1, 1, 1)'], {}), '((self.batch_size, 1, 1, 1))\n', (979, 1007), True, 'from tensorflow.keras import backend as K\n'), ((3060, 3116), 'tensorflow.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': 'weight_init[0]', 'stddev': 'weight_init[1]'}), '(mean=weight_init[0], stddev=weight_init[1])\n', (3072, 3116), False, 'from tensorflow.keras.initializers import RandomNormal\n'), ((4017, 4043), 'tensorflow.keras.backend.square', 'K.square', (['(1 - grad_l2_norm)'], {}), '(1 - grad_l2_norm)\n', (4025, 4043), True, 'from tensorflow.keras import backend as K\n'), ((4060, 4080), 'tensorflow.keras.backend.mean', 'K.mean', (['grad_penalty'], {}), '(grad_penalty)\n', (4066, 4080), True, 'from tensorflow.keras import backend as K\n'), ((4133, 4183), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.z_dim,)', 'name': '"""Generator_input"""'}), "(shape=(self.z_dim,), name='Generator_input')\n", (4138, 4183), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((5807, 5825), 'tensorflow.keras.models.Model', 'Model', (['ge_input', 'x'], {}), '(ge_input, x)\n', (5812, 5825), False, 'from tensorflow.keras.models import Model\n'), ((5881, 5936), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.input_dim', 'name': '"""Discriminator_input"""'}), "(shape=self.input_dim, name='Discriminator_input')\n", (5886, 5936), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((6608, 6626), 'tensorflow.keras.models.Model', 'Model', (['di_input', 'x'], {}), '(di_input, x)\n', (6613, 6626), False, 'from tensorflow.keras.models import Model\n'), ((6729, 6756), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.input_dim'}), '(shape=self.input_dim)\n', (6734, 6756), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((6771, 6797), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.z_dim,)'}), '(shape=(self.z_dim,))\n', (6776, 6797), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((7084, 7136), 'functools.partial', 'partial', (['self.gradinet_penalty_loss'], {'samples': 'samples'}), '(self.gradinet_penalty_loss, samples=samples)\n', (7091, 7136), False, 'from functools import partial\n'), ((7243, 7302), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[real_imgs, z]', 'outputs': '[real, fake, samples]'}), '(inputs=[real_imgs, z], outputs=[real, fake, samples])\n', (7248, 7302), False, 'from tensorflow.keras.models import Model\n'), ((7734, 7760), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.z_dim,)'}), '(shape=(self.z_dim,))\n', (7739, 7760), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((7863, 7901), 'tensorflow.keras.models.Model', 'Model', (['combined_input', 'combined_output'], {}), '(combined_input, combined_output)\n', (7868, 7901), False, 'from tensorflow.keras.models import Model\n'), ((8477, 8506), 'models.basemodel.np.ones', 'np.ones', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (8484, 8506), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((8519, 8572), 'models.basemodel.np.random.normal', 'np.random.normal', (['(0)', '(1)', '(self.batch_size, self.z_dim)'], {}), '(0, 1, (self.batch_size, self.z_dim))\n', (8535, 8572), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((8693, 8722), 'models.basemodel.np.ones', 'np.ones', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (8700, 8722), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((8785, 8815), 'models.basemodel.np.zeros', 'np.zeros', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (8793, 8815), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((8832, 8887), 'models.basemodel.np.random.randint', 'np.random.randint', (['(0)', 'x_train.shape[0]', 'self.batch_size'], {}), '(0, x_train.shape[0], self.batch_size)\n', (8849, 8887), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((8934, 8987), 'models.basemodel.np.random.normal', 'np.random.normal', (['(0)', '(1)', '(self.batch_size, self.z_dim)'], {}), '(0, 1, (self.batch_size, self.z_dim))\n', (8950, 8987), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((9335, 9349), 'models.basemodel.datetime.now', 'datetime.now', ([], {}), '()\n', (9347, 9349), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((12058, 12088), 'models.basemodel.plt.figure', 'plt.figure', ([], {'figsize': '(150, 100)'}), '(figsize=(150, 100))\n', (12068, 12088), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((13353, 13363), 'models.basemodel.plt.show', 'plt.show', ([], {}), '()\n', (13361, 13363), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((13372, 13381), 'models.basemodel.plt.cla', 'plt.cla', ([], {}), '()\n', (13379, 13381), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((13390, 13399), 'models.basemodel.plt.clf', 'plt.clf', ([], {}), '()\n', (13397, 13399), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((3341, 3414), 'models.basemodel.DataLoader', 'DataLoader', (['data_name', 'ID'], {'shape': 'input_dim', 'color': 'color', 'section': '"""WGANGP"""'}), "(data_name, ID, shape=input_dim, color=color, section='WGANGP')\n", (3351, 3414), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((3778, 3797), 'tensorflow.keras.backend.mean', 'K.mean', (['(real * pred)'], {}), '(real * pred)\n', (3784, 3797), True, 'from tensorflow.keras import backend as K\n'), ((3873, 3899), 'tensorflow.keras.backend.gradients', 'K.gradients', (['pred', 'samples'], {}), '(pred, samples)\n', (3884, 3899), True, 'from tensorflow.keras import backend as K\n'), ((4503, 4545), 'tensorflow.keras.layers.Reshape', 'Reshape', ([], {'target_shape': 'self.ge_initial_size'}), '(target_shape=self.ge_initial_size)\n', (4510, 4545), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((6487, 6496), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6494, 6496), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((6512, 6574), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': 'None', 'kernel_initializer': 'self.weight_init'}), '(1, activation=None, kernel_initializer=self.weight_init)\n', (6517, 6574), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((8739, 8768), 'models.basemodel.np.ones', 'np.ones', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (8746, 8768), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((3939, 3954), 'tensorflow.keras.backend.square', 'K.square', (['grads'], {}), '(grads)\n', (3947, 3954), True, 'from tensorflow.keras import backend as K\n'), ((4225, 4254), 'models.basemodel.np.prod', 'np.prod', (['self.ge_initial_size'], {}), '(self.ge_initial_size)\n', (4232, 4254), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((4392, 4439), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': 'self.ge_batch_norm'}), '(momentum=self.ge_batch_norm)\n', (4410, 4439), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((4606, 4630), 'tensorflow.keras.layers.Dropout', 'Dropout', (['self.ge_dropout'], {}), '(self.ge_dropout)\n', (4613, 4630), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((4733, 4747), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {}), '()\n', (4745, 4747), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((6438, 6462), 'tensorflow.keras.layers.Dropout', 'Dropout', (['self.di_dropout'], {}), '(self.di_dropout)\n', (6445, 6462), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((5610, 5657), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': 'self.ge_batch_norm'}), '(momentum=self.ge_batch_norm)\n', (5628, 5657), False, 'from tensorflow.keras.layers import Input, Reshape, Dense, Conv2D, Layer, BatchNormalization, UpSampling2D, Dropout, Flatten, Conv2DTranspose\n'), ((10007, 10021), 'models.basemodel.datetime.now', 'datetime.now', ([], {}), '()\n', (10019, 10021), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n'), ((11372, 11386), 'models.basemodel.datetime.now', 'datetime.now', ([], {}), '()\n', (11384, 11386), False, 'from models.basemodel import BaseModel, DataLoader, np, datetime, plt\n')]
|
from datetime import datetime
import traceback
import numpy as np
import face_recognition as fr
import glob
import datetime
import os
from stat import *
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import cv2
import matplotlib.pyplot as plt
import time
import sys
import re
import dlib
N_CLUSTERS = 8
BATCH_SIZE = 32
UPSAMPLE = 1
FRAME_START = 400
FRAME_END = 1300
FORMAT = '%-20s: %s'
def isDir(path):
mode = os.stat(path)[ST_MODE]
return S_ISDIR(mode)
def extractFaces(frame_paths):
n = len(frame_paths)
face_encodings = []
enc_to_loc = []
frame_batch = []
for frame_counter in range(n):
frame = fr.load_image_file(frame_paths[frame_counter])
frame_batch.append(frame)
if frame_counter != n - 1 and len(frame_batch) != BATCH_SIZE:
continue
loc_batch = fr.batch_face_locations(frame_batch, number_of_times_to_upsample=UPSAMPLE)
for frame_number_in_batch, curr_locations in enumerate(loc_batch):
curr_frame_number = frame_counter + 1 - len(frame_batch) + frame_number_in_batch
curr_frame_path = frame_paths[curr_frame_number]
curr_frame = frame_batch[frame_number_in_batch]
m = ('%-20s %-6d %-3d' % (curr_frame_path, curr_frame_number, len(curr_locations)))
print(FORMAT % ('proc_frame', m))
if len(curr_locations) == 0:
continue
curr_encodings = fr.face_encodings(curr_frame, known_face_locations=curr_locations)
for k in range(len(curr_encodings)):
enc = curr_encodings[k]
loc = curr_locations[k]
enc_to_loc.append({'frame': curr_frame_number, 'loc': loc})
face_encodings.append(enc)
frame_batch = []
return (face_encodings, enc_to_loc)
def detectSpeaker(frame_paths, face_encodings, enc_to_loc, vid_name):
print(FORMAT % ('cluster_inp', len(face_encodings)))
if len(face_encodings) < N_CLUSTERS:
return
enc_arr = np.asanyarray(face_encodings)
k_means = KMeans(n_clusters=N_CLUSTERS).fit(enc_arr)
preds = k_means.predict(enc_arr)
dists = k_means.transform(enc_arr)
largest_cluster = np.argmax(np.unique(preds, return_counts=True)[1])
closest_to_center = np.argmin(dists[:, largest_cluster])
face_loc = enc_to_loc[closest_to_center]
top, right, bottom, left = face_loc['loc']
frame_number = face_loc['frame']
speaker_frame_path = frame_paths[frame_number]
speaker_cluster_center = k_means.cluster_centers_[largest_cluster, :]
speaker_cluster_size = dists[:, largest_cluster].shape[0]
print(FORMAT % ('speaker_clsize', '%d' % (speaker_cluster_size)))
print(FORMAT % ('speaker', '%s -> (%d, %d, %d, %d)' % \
(speaker_frame_path, top, right, bottom, left)))
im = cv2.imread(speaker_frame_path)
cv2.rectangle(im, (left, top), (right, bottom), (0, 255, 0), 3)
cv2.imwrite(vid_name + '.jpg', im)
np.save(vid_name + '.npy', speaker_cluster_center)
return closest_to_center
def clipPaths(paths):
pat = re.compile(r'([0-9]+)\.jpg$')
sorted_paths = ['' for i in range(len(paths))]
for path in paths:
m = pat.search(path)
num = int(m.group(1))
sorted_paths[num-1] = path
return sorted_paths[FRAME_START:FRAME_END]
sep = ''.join(['='] * 70)
def handlePaths(paths):
for path in paths:
if not isDir(path):
continue
tic = time.time()
print(FORMAT % ('start_path', path))
frame_paths = clipPaths(glob.glob(path + '/*'))
face_encodings, enc_to_loc = extractFaces(frame_paths)
vid_name = re.sub(r'^.+\/', '', path)
face_idx = detectSpeaker(frame_paths, face_encodings, enc_to_loc, vid_name)
toc = time.time()
print(FORMAT % ('duration', '%.5f s' % (toc-tic)))
print(FORMAT % ('done', sep))
# Logger to give output to console as well as a file simultaneously.
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
dt = str(datetime.datetime.now().date())
tm = str(datetime.datetime.now().time())
fname = 'detect_speaker_' + dt + '_' + tm.replace(':', '.') + '.log'
self.log = open(fname, "a") # specify file name here
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
sys.stdout = Logger()
sys.stderr = sys.stdout
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Incorrect usage. Needs paths to speaker frames directories.')
sys.exit(1)
paths = sys.argv[1:]
print(FORMAT % ('all_devices', str(dlib.cuda.get_num_devices())))
print(FORMAT % ('gpu_device', str(dlib.cuda.get_device())))
try:
handlePaths(paths)
except Exception as e:
print(FORMAT % ('error', traceback.format_exc()))
|
[
"numpy.argmin",
"cv2.rectangle",
"glob.glob",
"numpy.unique",
"cv2.imwrite",
"face_recognition.face_encodings",
"sklearn.cluster.KMeans",
"traceback.format_exc",
"datetime.datetime.now",
"re.sub",
"numpy.save",
"os.stat",
"face_recognition.batch_face_locations",
"dlib.cuda.get_device",
"sys.exit",
"re.compile",
"numpy.asanyarray",
"dlib.cuda.get_num_devices",
"time.time",
"cv2.imread",
"face_recognition.load_image_file"
] |
[((2125, 2154), 'numpy.asanyarray', 'np.asanyarray', (['face_encodings'], {}), '(face_encodings)\n', (2138, 2154), True, 'import numpy as np\n'), ((2392, 2428), 'numpy.argmin', 'np.argmin', (['dists[:, largest_cluster]'], {}), '(dists[:, largest_cluster])\n', (2401, 2428), True, 'import numpy as np\n'), ((2963, 2993), 'cv2.imread', 'cv2.imread', (['speaker_frame_path'], {}), '(speaker_frame_path)\n', (2973, 2993), False, 'import cv2\n'), ((2999, 3062), 'cv2.rectangle', 'cv2.rectangle', (['im', '(left, top)', '(right, bottom)', '(0, 255, 0)', '(3)'], {}), '(im, (left, top), (right, bottom), (0, 255, 0), 3)\n', (3012, 3062), False, 'import cv2\n'), ((3068, 3102), 'cv2.imwrite', 'cv2.imwrite', (["(vid_name + '.jpg')", 'im'], {}), "(vid_name + '.jpg', im)\n", (3079, 3102), False, 'import cv2\n'), ((3108, 3158), 'numpy.save', 'np.save', (["(vid_name + '.npy')", 'speaker_cluster_center'], {}), "(vid_name + '.npy', speaker_cluster_center)\n", (3115, 3158), True, 'import numpy as np\n'), ((3227, 3256), 're.compile', 're.compile', (['"""([0-9]+)\\\\.jpg$"""'], {}), "('([0-9]+)\\\\.jpg$')\n", (3237, 3256), False, 'import re\n'), ((472, 485), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (479, 485), False, 'import os\n'), ((704, 750), 'face_recognition.load_image_file', 'fr.load_image_file', (['frame_paths[frame_counter]'], {}), '(frame_paths[frame_counter])\n', (722, 750), True, 'import face_recognition as fr\n'), ((904, 978), 'face_recognition.batch_face_locations', 'fr.batch_face_locations', (['frame_batch'], {'number_of_times_to_upsample': 'UPSAMPLE'}), '(frame_batch, number_of_times_to_upsample=UPSAMPLE)\n', (927, 978), True, 'import face_recognition as fr\n'), ((3630, 3641), 'time.time', 'time.time', ([], {}), '()\n', (3639, 3641), False, 'import time\n'), ((3829, 3855), 're.sub', 're.sub', (['"""^.+\\\\/"""', '""""""', 'path'], {}), "('^.+\\\\/', '', path)\n", (3835, 3855), False, 'import re\n'), ((3956, 3967), 'time.time', 'time.time', ([], {}), '()\n', (3965, 3967), False, 'import time\n'), ((4989, 5000), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4997, 5000), False, 'import sys\n'), ((1522, 1588), 'face_recognition.face_encodings', 'fr.face_encodings', (['curr_frame'], {'known_face_locations': 'curr_locations'}), '(curr_frame, known_face_locations=curr_locations)\n', (1539, 1588), True, 'import face_recognition as fr\n'), ((2170, 2199), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'N_CLUSTERS'}), '(n_clusters=N_CLUSTERS)\n', (2176, 2199), False, 'from sklearn.cluster import KMeans\n'), ((2326, 2362), 'numpy.unique', 'np.unique', (['preds'], {'return_counts': '(True)'}), '(preds, return_counts=True)\n', (2335, 2362), True, 'import numpy as np\n'), ((3721, 3743), 'glob.glob', 'glob.glob', (["(path + '/*')"], {}), "(path + '/*')\n", (3730, 3743), False, 'import glob\n'), ((4243, 4266), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4264, 4266), False, 'import datetime\n'), ((4293, 4316), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4314, 4316), False, 'import datetime\n'), ((5069, 5096), 'dlib.cuda.get_num_devices', 'dlib.cuda.get_num_devices', ([], {}), '()\n', (5094, 5096), False, 'import dlib\n'), ((5139, 5161), 'dlib.cuda.get_device', 'dlib.cuda.get_device', ([], {}), '()\n', (5159, 5161), False, 'import dlib\n'), ((5267, 5289), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5287, 5289), False, 'import traceback\n')]
|
from functools import wraps
from rest_framework import status
from rest_framework.response import Response
from apps.core.backends import sudo_password_needed, sudo_renew
def api_sudo_required(view_func):
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
if not request.user.has_usable_password():
return view_func(request, *args, **kwargs)
if not sudo_password_needed(request.session):
sudo_renew(request)
return view_func(request, *args, **kwargs)
return Response({}, status=status.HTTP_412_PRECONDITION_FAILED)
return _wrapped_view
|
[
"apps.core.backends.sudo_renew",
"rest_framework.response.Response",
"functools.wraps",
"apps.core.backends.sudo_password_needed"
] |
[((214, 230), 'functools.wraps', 'wraps', (['view_func'], {}), '(view_func)\n', (219, 230), False, 'from functools import wraps\n'), ((542, 598), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_412_PRECONDITION_FAILED'}), '({}, status=status.HTTP_412_PRECONDITION_FAILED)\n', (550, 598), False, 'from rest_framework.response import Response\n'), ((401, 438), 'apps.core.backends.sudo_password_needed', 'sudo_password_needed', (['request.session'], {}), '(request.session)\n', (421, 438), False, 'from apps.core.backends import sudo_password_needed, sudo_renew\n'), ((452, 471), 'apps.core.backends.sudo_renew', 'sudo_renew', (['request'], {}), '(request)\n', (462, 471), False, 'from apps.core.backends import sudo_password_needed, sudo_renew\n')]
|
# Generated by Django 3.0.4 on 2021-04-07 10:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blogs', '0003_auto_20210407_0913'),
]
operations = [
migrations.AlterModelOptions(
name='postcomment',
options={},
),
migrations.AlterModelTable(
name='post',
table='post',
),
migrations.AlterModelTable(
name='postcomment',
table='post_comment',
),
migrations.AlterModelTable(
name='postimage',
table='post_image',
),
]
|
[
"django.db.migrations.AlterModelTable",
"django.db.migrations.AlterModelOptions"
] |
[((225, 285), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""postcomment"""', 'options': '{}'}), "(name='postcomment', options={})\n", (253, 285), False, 'from django.db import migrations\n'), ((330, 383), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""post"""', 'table': '"""post"""'}), "(name='post', table='post')\n", (356, 383), False, 'from django.db import migrations\n'), ((428, 496), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""postcomment"""', 'table': '"""post_comment"""'}), "(name='postcomment', table='post_comment')\n", (454, 496), False, 'from django.db import migrations\n'), ((541, 605), 'django.db.migrations.AlterModelTable', 'migrations.AlterModelTable', ([], {'name': '"""postimage"""', 'table': '"""post_image"""'}), "(name='postimage', table='post_image')\n", (567, 605), False, 'from django.db import migrations\n')]
|
import numpy
from tabulate import tabulate
import time
from threading import Lock
class MovRStats:
def __init__(self):
self.cumulative_counts = {}
self.instantiation_time = time.time()
self.mutex = Lock()
self.new_window()
# reset stats while keeping cumulative counts
def new_window(self):
self.mutex.acquire()
try:
self.window_start_time = time.time()
self.window_stats = {}
finally:
self.mutex.release()
# add one latency measurement in seconds
def add_latency_measurement(self, action, measurement):
self.mutex.acquire()
try:
self.window_stats.setdefault(action,[]).append(measurement)
self.cumulative_counts.setdefault(action,0)
self.cumulative_counts[action]+=1
finally:
self.mutex.release()
# print the current stats this instance has collected.
# If action_list is empty, it will only prevent rows it has captured this period, otherwise it will print a row for each action.
def print_stats(self, action_list = []):
def get_percentile_measurement(action, percentile):
return numpy.percentile(self.window_stats.setdefault(action, [0]), percentile)
def get_stats_row(action):
elapsed = time.time() - self.instantiation_time
if action in self.window_stats:
return [action, round(elapsed, 0), self.cumulative_counts[action], len(self.window_stats[action]),
len(self.window_stats[action]) / elapsed,
round(float(get_percentile_measurement(action, 50)) * 1000, 2),
round(float(get_percentile_measurement(action, 90)) * 1000, 2),
round(float(get_percentile_measurement(action, 95)) * 1000, 2),
round(float(get_percentile_measurement(action, 100)) * 1000, 2)]
else:
return [action, round(elapsed, 0), 0,0,0,0,0,0,0 ]
header = ["transaction name", "time(total)", "ops(total)", "ops", "ops/second", "p50(ms)", "p90(ms)", "p95(ms)", "max(ms)"]
rows = []
self.mutex.acquire()
try:
if len(action_list):
for action in sorted(action_list):
rows.append(get_stats_row(action))
else:
for action in sorted(list(self.window_stats)):
rows.append(get_stats_row(action))
print(tabulate(rows, header), "\n")
finally:
self.mutex.release()
|
[
"threading.Lock",
"tabulate.tabulate",
"time.time"
] |
[((197, 208), 'time.time', 'time.time', ([], {}), '()\n', (206, 208), False, 'import time\n'), ((230, 236), 'threading.Lock', 'Lock', ([], {}), '()\n', (234, 236), False, 'from threading import Lock\n'), ((419, 430), 'time.time', 'time.time', ([], {}), '()\n', (428, 430), False, 'import time\n'), ((1335, 1346), 'time.time', 'time.time', ([], {}), '()\n', (1344, 1346), False, 'import time\n'), ((2526, 2548), 'tabulate.tabulate', 'tabulate', (['rows', 'header'], {}), '(rows, header)\n', (2534, 2548), False, 'from tabulate import tabulate\n')]
|
import logging
import re
import statistics
from pprint import pprint
from utils import functions as F
from .attribute import Attribute
from .inverted_index import InvertedIndex
from .occurrence import Occurrence
logger = logging.getLogger(__name__)
class KnowledgeBase:
'''A KnowledgeBase has the following properties:
Attributes:
k_base: A dict representing the attributes and their list of terms.
inverted_k_base: A dict representing the all terms of the Knowledge Base and
the attributes they are present.
attribute_statistics: A dict representing each attribute statistics.
co_occurrences: A dict representing the terms and their co-occurrences.
'''
def __init__(self, kb_file):
'''Return a Knowledge Base object'''
self.k_base = {}
self.inverted_k_base = {}
self.co_occurrences = {}
self.attribute_statistics = {}
self.init_kb(kb_file)
self.init_inverted_k_base()
self.init_atribute_statistics()
def init_kb(self, kb_file):
'''Parse Knowledge Base and prepare it to extract the content-based features'''
logger.info('Parsing knowledge base file...')
data = F.read_k_base(kb_file)
for item in data:
attribute = item.tag
value = F.remove_stop_words(F.normalize_str(item.text))
# Check if a value contains only stop words
if not value:
continue
terms = value.split()
i = 0
while i < len(terms)-1:
if terms[i] in self.co_occurrences:
if (terms[i+1], attribute) not in self.co_occurrences[terms[i]]:
self.co_occurrences[terms[i]].append(
(terms[i+1], attribute))
else:
self.co_occurrences[terms[i]] = []
i += 1
if terms[-1] not in self.co_occurrences:
self.co_occurrences[terms[-1]] = []
for term in terms:
occurrence = Occurrence(term)
if attribute in self.k_base:
if term not in [obj.term for obj in self.k_base[attribute]]:
self.k_base[attribute].append(occurrence)
else:
occ = [v for v in self.k_base[attribute]
if v.term == term]
occ[0].frequency += 1
else:
self.k_base[attribute] = [occurrence]
def init_inverted_k_base(self):
'''Create an inverted index for the Knowledge Base'''
self.inverted_k_base = InvertedIndex(self.k_base).inverted_k_base
def get_attributes(self):
'''Get a list with all attributes in the Knowledge Base'''
return [v for v in self.k_base.keys()]
def init_atribute_statistics(self):
'''Set a list of attribute statistics'''
for attr in self.get_attributes():
most_commom = self.get_most_common_term_by_attribute(attr)
avg = 0.0
stdev = 0.0
numeric_values = [
int(v.term) for v in self.k_base[attr] if re.match(r'^\d+$', v.term)]
if len(numeric_values):
avg = statistics.mean(numeric_values)
if len(numeric_values) > 1:
stdev = statistics.stdev(numeric_values)
self.attribute_statistics[attr] = Attribute(
attr, avg, stdev, most_commom)
def get_most_common_term_by_attribute(self, attr):
'''Get the highest frequency of any term among the values of A'''
terms = [v for v in self.k_base[attr]]
return max(x.frequency for x in terms if x.term)
def get_term_frequency_by_attribute(self, term, attr):
'''Get the number of distinct values of attribute A that contain the term t'''
if term in self.inverted_k_base:
frequency = [v[1]
for v in self.inverted_k_base[term] if v[0] == attr]
if len(frequency):
return frequency[0]
return 0
def get_term_occurrence_number(self, term):
'''Get the total number of occurrences of the term t in all attributes'''
if term in self.inverted_k_base:
return sum(v[1] for v in self.inverted_k_base[term])
return 0
|
[
"statistics.stdev",
"re.match",
"utils.functions.read_k_base",
"statistics.mean",
"logging.getLogger",
"utils.functions.normalize_str"
] |
[((224, 251), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (241, 251), False, 'import logging\n'), ((1218, 1240), 'utils.functions.read_k_base', 'F.read_k_base', (['kb_file'], {}), '(kb_file)\n', (1231, 1240), True, 'from utils import functions as F\n'), ((1341, 1367), 'utils.functions.normalize_str', 'F.normalize_str', (['item.text'], {}), '(item.text)\n', (1356, 1367), True, 'from utils import functions as F\n'), ((3303, 3334), 'statistics.mean', 'statistics.mean', (['numeric_values'], {}), '(numeric_values)\n', (3318, 3334), False, 'import statistics\n'), ((3399, 3431), 'statistics.stdev', 'statistics.stdev', (['numeric_values'], {}), '(numeric_values)\n', (3415, 3431), False, 'import statistics\n'), ((3217, 3243), 're.match', 're.match', (['"""^\\\\d+$"""', 'v.term'], {}), "('^\\\\d+$', v.term)\n", (3225, 3243), False, 'import re\n')]
|
import sys
if sys.version_info[:2] < (2, 7):
import unittest2 as unittest
else:
import unittest
from depsolver.debian_version \
import \
DebianVersion, is_valid_debian_version
V = DebianVersion.from_string
class TestVersionParsing(unittest.TestCase):
def test_valid_versions(self):
versions = ["1.2.0", "1.2.3-1", "0:1.2.3-1"]
for version in versions:
self.assertTrue(is_valid_debian_version(version))
def test_roundtrip(self):
versions = ["1.2.0", "1.2.0-0", "0:1.2.0"]
for version in versions:
self.assertEqual(str(V(version)), version)
class TestVersionComparison(unittest.TestCase):
def test_eq(self):
self.assertTrue(V("1.2.3") == V("1.2.3"))
self.assertTrue(V("1.2.3") == V("0:1.2.3"))
self.assertTrue(V("1.2.3") == V("1.2.3-0"))
self.assertFalse(V("1.2.3") == V("1:1.2.3"))
def test_lt(self):
self.assertTrue(V("1") < V("2"))
self.assertTrue(V("1.0") < V("1.1"))
self.assertTrue(V("1.0") < V("1:1.0"))
self.assertTrue(V("1.0") < V("1.0-1"))
self.assertTrue(V("1.0-1bpo1") < V("1.0-1.1"))
def test_gt(self):
self.assertTrue(V("2") > V("1"))
self.assertTrue(V("1.2.3") > V("1.2.1"))
self.assertTrue(V("1.0-1") > V("1.0-0"))
self.assertTrue(V("1.0-1") > V("1.0-0.1"))
self.assertTrue(V("1.0beta1") > V("1.0"))
self.assertTrue(V("1.0beta1") > V("1.0-1"))
self.assertTrue(V("1.0-1bpo1") > V("1.0-1"))
self.assertTrue(V("1.0-1") > V("1.0-1~sarge1"))
|
[
"depsolver.debian_version.is_valid_debian_version"
] |
[((425, 457), 'depsolver.debian_version.is_valid_debian_version', 'is_valid_debian_version', (['version'], {}), '(version)\n', (448, 457), False, 'from depsolver.debian_version import DebianVersion, is_valid_debian_version\n')]
|
import json
from .models import *
def cookieCart(request):
try:
cart = json.loads(request.COOKIES["cart"])
except:
cart = {}
print("Cart:", cart)
items = []
order = {"get_cart_total": 0, "get_cart_items": 0}
cartItems = order["get_cart_items"]
for i in cart:
try:
cartItems += cart[i]["quantity"]
product = Product.objects.get(id=1)
total = product.price * cart[i]["quantity"]
order["get_cart_total"] += total
order["get_cart_items"] += cart[i]["quantity"]
item = {
"product": {
"id": product.id,
"name": product.name,
"price": product.price,
"imageURL": product.imageURL,
},
"quantity": cart[i]["quantity"],
"get_total": total,
}
items.append(item)
if product.digital == False:
order["shipping"] = True
except:
pass
context = {
"items": items,
"order": order,
"cartItems": cartItems,
"shipping": False,
}
return {"cartItems": cartItems, "order": order, "items": items}
|
[
"json.loads"
] |
[((86, 121), 'json.loads', 'json.loads', (["request.COOKIES['cart']"], {}), "(request.COOKIES['cart'])\n", (96, 121), False, 'import json\n')]
|
import abc
import six
from typing import Dict, Set, Any, Union # noqa: F401
NODE_KEY = 'KEY'
NODE_LABEL = 'LABEL'
NODE_REQUIRED_HEADERS = {NODE_LABEL, NODE_KEY}
RELATION_START_KEY = 'START_KEY'
RELATION_START_LABEL = 'START_LABEL'
RELATION_END_KEY = 'END_KEY'
RELATION_END_LABEL = 'END_LABEL'
RELATION_TYPE = 'TYPE'
RELATION_REVERSE_TYPE = 'REVERSE_TYPE'
RELATION_REQUIRED_HEADERS = {RELATION_START_KEY, RELATION_START_LABEL,
RELATION_END_KEY, RELATION_END_LABEL,
RELATION_TYPE, RELATION_REVERSE_TYPE}
LABELS = {NODE_LABEL, RELATION_START_LABEL, RELATION_END_LABEL}
TYPES = {RELATION_TYPE, RELATION_REVERSE_TYPE}
@six.add_metaclass(abc.ABCMeta)
class Neo4jCsvSerializable(object):
"""
A Serializable abstract class asks subclass to implement next node or
next relation in dict form so that it can be serialized to CSV file.
Any model class that needs to be pushed to Neo4j should inherit this class.
"""
def __init__(self):
# type: () -> None
pass
@abc.abstractmethod
def create_next_node(self):
# type: () -> Union[Dict[str, Any], None]
"""
Creates dict where keys represent header in CSV and value represents
row in CSV file. Should the class could have different types of
nodes that it needs to serialize, it just needs to provide dict with
different header -- the one who consumes this class figures it out and
serialize to different file.
Node is Neo4j's term of Vertex in Graph. More information on
https://neo4j.com/docs/developer-manual/current/introduction/
graphdb-concepts/
:return: a dict or None if no more record to serialize
"""
raise NotImplementedError
@abc.abstractmethod
def create_next_relation(self):
# type: () -> Union[Dict[str, Any], None]
"""
Creates dict where keys represent header in CSV and value represents
row in CSV file. Should the class could have different types of
relations that it needs to serialize, it just needs to provide dict
with different header -- the one who consumes this class figures it
out and serialize to different file.
Relationship is Neo4j's term of Edge in Graph. More information on
https://neo4j.com/docs/developer-manual/current/introduction/
graphdb-concepts/
:return: a dict or None if no more record to serialize
"""
raise NotImplementedError
def next_node(self):
# type: () -> Union[Dict[str, Any], None]
"""
Provides node(vertex) in dict form.
Note that subsequent call can create different header (dict.keys())
which implicitly mean that it needs to be serialized in different
CSV file (as CSV is in fixed header)
:return: Non-nested dict where key is CSV header and each value
is a column
"""
node_dict = self.create_next_node()
if not node_dict:
return None
self._validate(NODE_REQUIRED_HEADERS, node_dict)
return node_dict
def next_relation(self):
# type: () -> Union[Dict[str, Any], None]
"""
Provides relation(edge) in dict form.
Note that subsequent call can create different header (dict.keys())
which implicitly mean that it needs to be serialized in different
CSV file (as CSV is in fixed header)
:return: Non-nested dict where key is CSV header and each value
is a column
"""
relation_dict = self.create_next_relation()
if not relation_dict:
return None
self._validate(RELATION_REQUIRED_HEADERS, relation_dict)
return relation_dict
def _validate(self, required_set, val_dict):
# type: (Set[str], Dict[str, Any]) -> None
"""
Validates dict that represents CSV header and a row.
- Checks if it has required headers for either Node or Relation
- Checks value of LABEL if only first character is upper case
- Checks value of TYPE if it's all upper case characters
:param required_set:
:param val_dict:
:return:
"""
required_count = 0
for header_col, val_col in \
((header_col, val_col) for header_col, val_col
in six.iteritems(val_dict) if header_col in required_set):
required_count += 1
if header_col in LABELS:
if not val_col.istitle():
raise RuntimeError(
'LABEL should only have upper case character on its '
'first one: {}'.format(val_col))
elif header_col in TYPES:
if not val_col == val_col.upper():
raise RuntimeError(
'TYPE needs to be upper case: '.format(val_col))
if required_count != len(required_set):
raise RuntimeError(
'Required header missing. Required: {} , Header: {}'.format(
required_set, val_dict.keys()))
|
[
"six.iteritems",
"six.add_metaclass"
] |
[((679, 709), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (696, 709), False, 'import six\n'), ((4401, 4424), 'six.iteritems', 'six.iteritems', (['val_dict'], {}), '(val_dict)\n', (4414, 4424), False, 'import six\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""To perform inference on test set given a trained model."""
import copy
import os
import random
import re
import time
import json
from tqdm import tqdm
import math
import numpy as np
import tensorflow as tf
import model as diag_model
import model_helper
from dialogue import SelfplayDialogue
from utils import dialogue_utils
from utils import misc_utils as utils
from utils.dialogue_utils import task_SP_DISTRIBUTED
def handle_summary(diag_mode, summary_writer, global_step, all_summary,
summary_weight):
"""hanel all summary and combine them together."""
combined = {}
for summary in all_summary:
for key in summary:
if key not in combined:
combined[key] = []
combined[key].append(summary[key])
print('combined', combined)
for key in combined:
combined[key] = np.average(combined[key], weights=summary_weight)
name = diag_mode + '_' + key
utils.add_summary(summary_writer, global_step, name, combined[key])
def pred_action_to_obj(pred_action):
action_obj = {
'name': ' '.join([pred_action[0], pred_action[1]]),
'flight': [''],
'status': ''
}
fl_match = re.match('<fl_(\d+)>', pred_action[2])
if fl_match:
action_obj['flight'][0] = fl_match[0]
status_match = re.match('<st_(\w+)>', pred_action[3])
if status_match:
action_obj['status'] = status_match[0]
return action_obj
def utterance_to_dialogue(utt):
stack = ""
dialogue = []
for s in utt:
if s == "<t1>" or s == "<t2>":
if stack:
dialogue.append(stack)
stack = ""
stack += "customer:" if s == "<t1>" else "agent:"
elif s == "<eod>":
break
else:
stack += " " + s
if stack:
dialogue.append(stack)
return dialogue
def output_generated_data(generated_data, eval_out):
bs_intent, bs_pred_action, bs_truth_action, utt_arr, bs_kb = generated_data
for intent, pred_action, true_action, utterance, kb in zip(
bs_intent, bs_pred_action, bs_truth_action, utt_arr, bs_kb):
generated_obj = {
# 'intent': intent,
'pred_action': pred_action_to_obj(pred_action),
# 'action': true_action,
'dialogue': utterance_to_dialogue(utterance),
# 'kb': kb
}
# print('generated_obj', generated_obj)
eval_out.write(json.dumps(generated_obj) + '\n')
def single_worker_selfplay(mutable_model, immutable_model, mutable_sess,
immutable_sess, selfplay_data_file, selfplay_kb_file,
global_step, hparams, summary_writer):
"""selfplay with a single worker.
This is preminarily used for self play
evaluation.
"""
dialogue_mode = dialogue_utils.mode_self_play_dialogue_eval
# Read self play data
selfplay_data = dialogue_utils.load_data(selfplay_data_file)
selfplay_kb = dialogue_utils.load_data(selfplay_kb_file)
# construct dialogue object
dialogue = SelfplayDialogue(
mutable_model,
immutable_model,
mutable_sess,
immutable_sess,
hparams.max_dialogue_turns,
hparams.train_threadhold,
hparams.start_of_turn1,
hparams.start_of_turn2,
hparams.end_of_dialogue,
summary_writer=summary_writer,
dialogue_mode=dialogue_mode,
hparams=hparams)
batch_size = dialogue.self_play_eval_batch_size
assert batch_size <= len(selfplay_data)
loaded_mutable, _ = load_self_play_model(
dialogue.mutable_model, dialogue.mutable_sess, 'mutable',
hparams.self_play_pretrain_dir, hparams.out_dir)
loaded_immutable, _ = load_self_play_model(
dialogue.immutable_model, dialogue.immutable_sess, 'immutable',
hparams.self_play_pretrain_dir, hparams.out_dir)
worker_step = 0
all_summary = []
summary_weight = [] # used in combination with all_summary
# max_eval_per_flip = 100000
# We flip the role of the agent for exactly two times. In the first iteration
# when flip = 0, mutable model will be agent 1 and immutable model will be
# agent 2. The other way around when flip = 1.
start_time = time.time()
num_flips_for_initial_speaker = 2
with tf.gfile.GFile(hparams.selfplay_eval_output_file, 'w') as selfplay_out:
print('flip 1')
for flip in range(num_flips_for_initial_speaker):
# epoch = -1
i = len(selfplay_data) # force shuffling at the beginning
agent1, agent2, _ = dialogue.flip_agent(
(loaded_mutable, mutable_sess, dialogue.mutable_handles),
(loaded_immutable, immutable_sess, dialogue.immutable_handles), flip)
# only eval one epoch
# while epoch <= 0:
# print(i, max_eval_per_flip)
# if i * batch_size >= len(selfplay_data): # reacehd the end
input_data = list(zip(selfplay_data, selfplay_kb))
# we don't shuffle in evaluation
# random.shuffle(input_data) # random shuffle input data
# i = 0
selfplay_data, selfplay_kb = list(zip(*input_data))
# epoch += 1
ceil = int(math.ceil(len(selfplay_data) *1.0 / batch_size))
for i in tqdm(list(range(0, ceil))):
start_ind = i * batch_size
end_ind = min(i * batch_size + batch_size, len(selfplay_data))
batch_data = selfplay_data[start_ind:end_ind]
batch_kb = selfplay_kb[start_ind:end_ind]
# we indicate to let agent1 to talk first. Keep in mind that we will
# swap between agent1 and agent2.
speaker = flip % 2
generated_data, _, summary = dialogue.talk(hparams.max_dialogue_len,
batch_data, batch_kb, agent1,
agent2, worker_step,
end_ind - start_ind, speaker)
output_generated_data(generated_data, selfplay_out)
all_summary.append(summary)
# number of elements processed
summary_weight.append(end_ind - start_ind)
worker_step += 1
handle_summary(dialogue_mode, summary_writer, global_step, all_summary,
summary_weight)
end_time = time.time()
print('finished')
utils.add_summary(summary_writer, global_step, dialogue_mode + '_time',
end_time - start_time) # step wise summary
def load_self_play_model(model, sess, identity, supervised_learning_path,
self_play_path):
"""This function loads the self-play model.
It will first check the self play
directory. If it's empty it will then load the pre-trained model from
supervised learning.
"""
ckpt = tf.train.latest_checkpoint(self_play_path)
# first try self_play out dir
if ckpt:
print('{0} restore from self_play path at {1}'.format(
identity, self_play_path))
with model.graph.as_default():
model_helper.full_restore(sess, ckpt)
# if model doesn't exist then load supervised learning model
else:
print('{0} restore from supervised learning at {1}'.format(
identity, supervised_learning_path))
ckpt = tf.train.latest_checkpoint(supervised_learning_path)
assert ckpt
with model.graph.as_default():
# first do initialization to make sure that all variables are initialized
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
model_helper.full_restore(sess, ckpt)
return model, sess
def self_play_eval_fn(hparams,
identity,
num_workers=1,
jobid=0,
scope=None,
target_session=''):
"""This is the single worker self play.
Mostly used for self play
evaluation. identity is used here to distinguish between workers.
"""
model_creator = diag_model.Model
mutable_model = model_helper.create_selfplay_model(
model_creator,
True, # mutable is True
num_workers,
jobid,
hparams=hparams,
scope=scope)
immutable_model = model_helper.create_selfplay_model(
model_creator,
False, # mutable is False
num_workers,
jobid,
hparams=hparams,
scope=scope)
mutable_sess = tf.Session(
graph=mutable_model.graph,
config=tf.ConfigProto(
allow_soft_placement=True, device_count={'GPU': hparams.num_gpus}))
immutable_sess = tf.Session(
graph=immutable_model.graph,
config=tf.ConfigProto(
allow_soft_placement=True, device_count={'GPU': hparams.num_gpus}))
# number of steps per external eval
steps_per_external_eval = 10
# force conducting a self play at the beginning
last_external_eval_step = -1 * steps_per_external_eval
print('hparams.self_play_pretrain_dir=', hparams.self_play_pretrain_dir)
print('steps_per_external_eval=', steps_per_external_eval)
writer_path = os.path.join(hparams.out_dir,
identity + hparams.task_type + '_log')
summary_writer = tf.summary.FileWriter(writer_path, mutable_sess.graph)
print('summary_writer estabilished at', writer_path)
# waiting for checkpoints and loop forever
latest_ckpt = None
while True:
latest_ckpt = tf.contrib.training.wait_for_new_checkpoint(
hparams.out_dir, latest_ckpt)
print('got checkpoint', latest_ckpt)
# get the global_step variable first
with mutable_model.graph.as_default():
# first initialize to avoid encountering missing component for adam optimizer
_, global_step = model_helper.create_or_load_model(
mutable_model.model, hparams.out_dir, mutable_sess, hparams.task_type)
# valid evaluation step
if (not hparams.eval_forever) or (global_step - last_external_eval_step >=
steps_per_external_eval):
# if eval_forever is disabled, we will do one selfplay evalation
# otherwise, we will wait until certain number of timesteps are elapsed.
last_external_eval_step = global_step
print('do single worker evaluation')
single_worker_selfplay(mutable_model, immutable_model, mutable_sess,
immutable_sess, hparams.self_play_eval_data,
hparams.self_play_eval_kb, global_step, hparams,
summary_writer)
else:
print('Wait until steps_per_external_eval is reached.', global_step,
last_external_eval_step, steps_per_external_eval)
if not hparams.eval_forever:
break # if eval_foever is disabled, we only evaluate once
mutable_sess.close()
immutable_sess.close()
def multi_worker_selfplay(hparams,
identity,
scope=None,
target_session='',
is_chief=True,
ps_tasks=0,
num_workers=1,
jobid=0,
startup_delay_steps=0):
"""This is the multi worker selfplay, mostly used for self play
distributed training.
identity is used.
"""
immutable_model_reload_freq = hparams.immutable_model_reload_freq
# 1. models and summary writer
model_creator = diag_model.Model
extra_args = model_helper.ExtraArgs(
single_cell_fn=None,
model_device_fn=tf.train.replica_device_setter(ps_tasks),
attention_mechanism_fn=None)
mutable_model = model_helper.create_selfplay_model(
model_creator,
is_mutable=True,
num_workers=num_workers,
jobid=jobid,
hparams=hparams,
scope=scope,
extra_args=extra_args)
immutable_hparams = copy.deepcopy(hparams)
immutable_hparams.num_gpus = 0
immutable_model = model_helper.create_selfplay_model(
model_creator,
is_mutable=False,
num_workers=num_workers,
jobid=jobid,
hparams=immutable_hparams,
scope=scope)
if hparams.self_play_immutable_gpu:
print('using GPU for immutable')
immutable_sess = tf.Session(
graph=immutable_model.graph,
config=tf.ConfigProto(allow_soft_placement=True))
else:
print('not using GPU for immutable')
immutable_sess = tf.Session(
graph=immutable_model.graph,
config=tf.ConfigProto(
allow_soft_placement=True, device_count={'GPU': 0}))
immutable_model, immutable_sess = load_self_play_model(
immutable_model, immutable_sess, 'immutable',
hparams.self_play_pretrain_dir, hparams.out_dir)
global_step = immutable_model.model.global_step.eval(session=immutable_sess)
if is_chief:
ckpt = tf.train.latest_checkpoint(hparams.out_dir)
if not ckpt:
print('global_step, saving pretrain model to hparams.out_dir',
global_step, hparams.out_dir)
immutable_model.model.saver.save( # this is the prevent adam error
immutable_sess,
os.path.join(hparams.out_dir, 'dialogue.ckpt'),
global_step=global_step)
print('save finished')
if is_chief:
summary_writer_path = os.path.join(hparams.out_dir,
identity + task_SP_DISTRIBUTED + '_log')
summary_writer = tf.summary.FileWriter(summary_writer_path,
mutable_model.graph)
print('summary writer established at', summary_writer_path)
else:
summary_writer = None
# 2. supervisor and sessions
sv = tf.train.Supervisor(
graph=mutable_model.graph,
is_chief=is_chief,
saver=mutable_model.model.saver,
save_model_secs=0, # disable automatic save checkpoints
summary_op=None,
logdir=hparams.out_dir,
checkpoint_basename='dialogue.ckpt')
mutable_config = utils.get_config_proto(
log_device_placement=hparams.log_device_placement,
allow_soft_placement=True)
mutable_config.device_count['GPU'] = hparams.num_gpus
mutable_sess = sv.prepare_or_wait_for_session(
target_session,
config=mutable_config)
# 3. additiona preparations
global_step = mutable_model.model.global_step.eval(session=mutable_sess)
while global_step < (jobid * (jobid + 1) * startup_delay_steps / 2):
time.sleep(1)
global_step = mutable_model.model.global_step.eval(session=mutable_sess)
# save first model
if is_chief:
print('saving the first checkpoint to', hparams.out_dir)
mutable_model.model.saver.save(
mutable_sess,
os.path.join(hparams.out_dir, 'dialogue.ckpt'),
global_step=global_step)
last_save_step = global_step
# Read data
selfplay_data = dialogue_utils.load_data(hparams.self_play_train_data)
selfplay_kb = dialogue_utils.load_data(hparams.self_play_train_kb)
dialogue = SelfplayDialogue(
mutable_model,
immutable_model,
mutable_sess,
immutable_sess,
hparams.max_dialogue_turns,
hparams.train_threadhold,
hparams.start_of_turn1,
hparams.start_of_turn2,
hparams.end_of_dialogue,
summary_writer=summary_writer,
dialogue_mode=task_SP_DISTRIBUTED,
hparams=hparams)
# 4. main loop
last_immmutable_model_reload = global_step
last_save_step = global_step
batch_size = dialogue.batch_size
assert batch_size <= len(selfplay_data)
# this is the start point of the self-play data. force shuffling at the beginning
i = len(selfplay_data)
train_stats = [0, 0]
while global_step < hparams.num_self_play_train_steps + hparams.num_train_steps:
# a. reload immutable model, muttable will be automated managed by supervisor
if immutable_model_reload_freq > 0 and global_step - last_immmutable_model_reload > immutable_model_reload_freq:
immutable_model, immutable_sess = load_self_play_model(
immutable_model, immutable_sess, 'immutable',
hparams.self_play_pretrain_dir, hparams.out_dir)
last_immmutable_model_reload = global_step
# b. possiblely flip between speakers (or roll out models),
# based on either a random policy or by step counts
agent1, agent2, mutable_agent_index = dialogue.flip_agent(
(mutable_model, mutable_sess, dialogue.mutable_handles),
(immutable_model, immutable_sess, dialogue.immutable_handles))
train_stats[mutable_agent_index] += 1
# read selfplay data
start_time = time.time()
if i * batch_size + batch_size > len(selfplay_data): # reached the end
input_data = list(zip(selfplay_data, selfplay_kb))
random.shuffle(input_data) # random shuffle input data
i = 0
selfplay_data, selfplay_kb = list(zip(*input_data))
start_ind, end_ind = i * batch_size, i * batch_size + batch_size
batch_data, batch_kb = selfplay_data[start_ind:end_ind], selfplay_kb[
start_ind:end_ind]
train_example, _, _ = dialogue.talk(hparams.max_dialogue_len, batch_data,
batch_kb, agent1, agent2, global_step,
batch_size)
possible_global_step = dialogue.maybe_train(
train_example, mutable_agent_index, global_step, force=True)
if possible_global_step:
global_step = possible_global_step
if is_chief and global_step - last_save_step > hparams.self_play_dist_save_freq:
mutable_model.model.saver.save(
mutable_sess,
os.path.join(hparams.out_dir, 'dialogue.ckpt'),
global_step=global_step)
last_save_step = global_step
end_time = time.time()
if is_chief:
utils.add_summary(summary_writer, global_step,
task_SP_DISTRIBUTED + '_' + 'time',
end_time - start_time)
utils.add_summary(summary_writer, global_step,
task_SP_DISTRIBUTED + '_' + 'train_ratio',
train_stats[0] * 1.0 / (train_stats[1] + 0.1))
i += 1
if is_chief:
summary_writer.close()
mutable_sess.close()
immutable_sess.close()
|
[
"random.shuffle",
"json.dumps",
"tensorflow.ConfigProto",
"tensorflow.train.latest_checkpoint",
"tensorflow.tables_initializer",
"os.path.join",
"utils.misc_utils.add_summary",
"tensorflow.summary.FileWriter",
"tensorflow.contrib.training.wait_for_new_checkpoint",
"copy.deepcopy",
"numpy.average",
"model_helper.create_or_load_model",
"tensorflow.global_variables_initializer",
"dialogue.SelfplayDialogue",
"re.match",
"time.sleep",
"tensorflow.gfile.GFile",
"tensorflow.train.replica_device_setter",
"utils.dialogue_utils.load_data",
"utils.misc_utils.get_config_proto",
"model_helper.full_restore",
"time.time",
"tensorflow.train.Supervisor",
"model_helper.create_selfplay_model"
] |
[((1737, 1776), 're.match', 're.match', (['"""<fl_(\\\\d+)>"""', 'pred_action[2]'], {}), "('<fl_(\\\\d+)>', pred_action[2])\n", (1745, 1776), False, 'import re\n'), ((1858, 1897), 're.match', 're.match', (['"""<st_(\\\\w+)>"""', 'pred_action[3]'], {}), "('<st_(\\\\w+)>', pred_action[3])\n", (1866, 1897), False, 'import re\n'), ((3415, 3459), 'utils.dialogue_utils.load_data', 'dialogue_utils.load_data', (['selfplay_data_file'], {}), '(selfplay_data_file)\n', (3439, 3459), False, 'from utils import dialogue_utils\n'), ((3476, 3518), 'utils.dialogue_utils.load_data', 'dialogue_utils.load_data', (['selfplay_kb_file'], {}), '(selfplay_kb_file)\n', (3500, 3518), False, 'from utils import dialogue_utils\n'), ((3563, 3862), 'dialogue.SelfplayDialogue', 'SelfplayDialogue', (['mutable_model', 'immutable_model', 'mutable_sess', 'immutable_sess', 'hparams.max_dialogue_turns', 'hparams.train_threadhold', 'hparams.start_of_turn1', 'hparams.start_of_turn2', 'hparams.end_of_dialogue'], {'summary_writer': 'summary_writer', 'dialogue_mode': 'dialogue_mode', 'hparams': 'hparams'}), '(mutable_model, immutable_model, mutable_sess,\n immutable_sess, hparams.max_dialogue_turns, hparams.train_threadhold,\n hparams.start_of_turn1, hparams.start_of_turn2, hparams.end_of_dialogue,\n summary_writer=summary_writer, dialogue_mode=dialogue_mode, hparams=hparams\n )\n', (3579, 3862), False, 'from dialogue import SelfplayDialogue\n'), ((4699, 4710), 'time.time', 'time.time', ([], {}), '()\n', (4708, 4710), False, 'import time\n'), ((6698, 6709), 'time.time', 'time.time', ([], {}), '()\n', (6707, 6709), False, 'import time\n'), ((6732, 6831), 'utils.misc_utils.add_summary', 'utils.add_summary', (['summary_writer', 'global_step', "(dialogue_mode + '_time')", '(end_time - start_time)'], {}), "(summary_writer, global_step, dialogue_mode + '_time', \n end_time - start_time)\n", (6749, 6831), True, 'from utils import misc_utils as utils\n'), ((7180, 7222), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self_play_path'], {}), '(self_play_path)\n', (7206, 7222), True, 'import tensorflow as tf\n'), ((8377, 8486), 'model_helper.create_selfplay_model', 'model_helper.create_selfplay_model', (['model_creator', '(True)', 'num_workers', 'jobid'], {'hparams': 'hparams', 'scope': 'scope'}), '(model_creator, True, num_workers, jobid,\n hparams=hparams, scope=scope)\n', (8411, 8486), False, 'import model_helper\n'), ((8559, 8669), 'model_helper.create_selfplay_model', 'model_helper.create_selfplay_model', (['model_creator', '(False)', 'num_workers', 'jobid'], {'hparams': 'hparams', 'scope': 'scope'}), '(model_creator, False, num_workers, jobid,\n hparams=hparams, scope=scope)\n', (8593, 8669), False, 'import model_helper\n'), ((9396, 9464), 'os.path.join', 'os.path.join', (['hparams.out_dir', "(identity + hparams.task_type + '_log')"], {}), "(hparams.out_dir, identity + hparams.task_type + '_log')\n", (9408, 9464), False, 'import os\n'), ((9513, 9567), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['writer_path', 'mutable_sess.graph'], {}), '(writer_path, mutable_sess.graph)\n', (9534, 9567), True, 'import tensorflow as tf\n'), ((11925, 12090), 'model_helper.create_selfplay_model', 'model_helper.create_selfplay_model', (['model_creator'], {'is_mutable': '(True)', 'num_workers': 'num_workers', 'jobid': 'jobid', 'hparams': 'hparams', 'scope': 'scope', 'extra_args': 'extra_args'}), '(model_creator, is_mutable=True,\n num_workers=num_workers, jobid=jobid, hparams=hparams, scope=scope,\n extra_args=extra_args)\n', (11959, 12090), False, 'import model_helper\n'), ((12148, 12170), 'copy.deepcopy', 'copy.deepcopy', (['hparams'], {}), '(hparams)\n', (12161, 12170), False, 'import copy\n'), ((12224, 12378), 'model_helper.create_selfplay_model', 'model_helper.create_selfplay_model', (['model_creator'], {'is_mutable': '(False)', 'num_workers': 'num_workers', 'jobid': 'jobid', 'hparams': 'immutable_hparams', 'scope': 'scope'}), '(model_creator, is_mutable=False,\n num_workers=num_workers, jobid=jobid, hparams=immutable_hparams, scope=\n scope)\n', (12258, 12378), False, 'import model_helper\n'), ((13909, 14114), 'tensorflow.train.Supervisor', 'tf.train.Supervisor', ([], {'graph': 'mutable_model.graph', 'is_chief': 'is_chief', 'saver': 'mutable_model.model.saver', 'save_model_secs': '(0)', 'summary_op': 'None', 'logdir': 'hparams.out_dir', 'checkpoint_basename': '"""dialogue.ckpt"""'}), "(graph=mutable_model.graph, is_chief=is_chief, saver=\n mutable_model.model.saver, save_model_secs=0, summary_op=None, logdir=\n hparams.out_dir, checkpoint_basename='dialogue.ckpt')\n", (13928, 14114), True, 'import tensorflow as tf\n'), ((14206, 14310), 'utils.misc_utils.get_config_proto', 'utils.get_config_proto', ([], {'log_device_placement': 'hparams.log_device_placement', 'allow_soft_placement': '(True)'}), '(log_device_placement=hparams.log_device_placement,\n allow_soft_placement=True)\n', (14228, 14310), True, 'from utils import misc_utils as utils\n'), ((15060, 15114), 'utils.dialogue_utils.load_data', 'dialogue_utils.load_data', (['hparams.self_play_train_data'], {}), '(hparams.self_play_train_data)\n', (15084, 15114), False, 'from utils import dialogue_utils\n'), ((15131, 15183), 'utils.dialogue_utils.load_data', 'dialogue_utils.load_data', (['hparams.self_play_train_kb'], {}), '(hparams.self_play_train_kb)\n', (15155, 15183), False, 'from utils import dialogue_utils\n'), ((15198, 15502), 'dialogue.SelfplayDialogue', 'SelfplayDialogue', (['mutable_model', 'immutable_model', 'mutable_sess', 'immutable_sess', 'hparams.max_dialogue_turns', 'hparams.train_threadhold', 'hparams.start_of_turn1', 'hparams.start_of_turn2', 'hparams.end_of_dialogue'], {'summary_writer': 'summary_writer', 'dialogue_mode': 'task_SP_DISTRIBUTED', 'hparams': 'hparams'}), '(mutable_model, immutable_model, mutable_sess,\n immutable_sess, hparams.max_dialogue_turns, hparams.train_threadhold,\n hparams.start_of_turn1, hparams.start_of_turn2, hparams.end_of_dialogue,\n summary_writer=summary_writer, dialogue_mode=task_SP_DISTRIBUTED,\n hparams=hparams)\n', (15214, 15502), False, 'from dialogue import SelfplayDialogue\n'), ((1399, 1448), 'numpy.average', 'np.average', (['combined[key]'], {'weights': 'summary_weight'}), '(combined[key], weights=summary_weight)\n', (1409, 1448), True, 'import numpy as np\n'), ((1486, 1553), 'utils.misc_utils.add_summary', 'utils.add_summary', (['summary_writer', 'global_step', 'name', 'combined[key]'], {}), '(summary_writer, global_step, name, combined[key])\n', (1503, 1553), True, 'from utils import misc_utils as utils\n'), ((4754, 4808), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['hparams.selfplay_eval_output_file', '"""w"""'], {}), "(hparams.selfplay_eval_output_file, 'w')\n", (4768, 4808), True, 'import tensorflow as tf\n'), ((7630, 7682), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['supervised_learning_path'], {}), '(supervised_learning_path)\n', (7656, 7682), True, 'import tensorflow as tf\n'), ((9722, 9795), 'tensorflow.contrib.training.wait_for_new_checkpoint', 'tf.contrib.training.wait_for_new_checkpoint', (['hparams.out_dir', 'latest_ckpt'], {}), '(hparams.out_dir, latest_ckpt)\n', (9765, 9795), True, 'import tensorflow as tf\n'), ((13098, 13141), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['hparams.out_dir'], {}), '(hparams.out_dir)\n', (13124, 13141), True, 'import tensorflow as tf\n'), ((13534, 13604), 'os.path.join', 'os.path.join', (['hparams.out_dir', "(identity + task_SP_DISTRIBUTED + '_log')"], {}), "(hparams.out_dir, identity + task_SP_DISTRIBUTED + '_log')\n", (13546, 13604), False, 'import os\n'), ((13665, 13728), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['summary_writer_path', 'mutable_model.graph'], {}), '(summary_writer_path, mutable_model.graph)\n', (13686, 13728), True, 'import tensorflow as tf\n'), ((14658, 14671), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (14668, 14671), False, 'import time\n'), ((16775, 16786), 'time.time', 'time.time', ([], {}), '()\n', (16784, 16786), False, 'import time\n'), ((17910, 17921), 'time.time', 'time.time', ([], {}), '()\n', (17919, 17921), False, 'import time\n'), ((7401, 7438), 'model_helper.full_restore', 'model_helper.full_restore', (['sess', 'ckpt'], {}), '(sess, ckpt)\n', (7426, 7438), False, 'import model_helper\n'), ((7910, 7947), 'model_helper.full_restore', 'model_helper.full_restore', (['sess', 'ckpt'], {}), '(sess, ckpt)\n', (7935, 7947), False, 'import model_helper\n'), ((8799, 8885), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'device_count': "{'GPU': hparams.num_gpus}"}), "(allow_soft_placement=True, device_count={'GPU': hparams.\n num_gpus})\n", (8813, 8885), True, 'import tensorflow as tf\n'), ((8972, 9058), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'device_count': "{'GPU': hparams.num_gpus}"}), "(allow_soft_placement=True, device_count={'GPU': hparams.\n num_gpus})\n", (8986, 9058), True, 'import tensorflow as tf\n'), ((10037, 10145), 'model_helper.create_or_load_model', 'model_helper.create_or_load_model', (['mutable_model.model', 'hparams.out_dir', 'mutable_sess', 'hparams.task_type'], {}), '(mutable_model.model, hparams.out_dir,\n mutable_sess, hparams.task_type)\n', (10070, 10145), False, 'import model_helper\n'), ((11829, 11869), 'tensorflow.train.replica_device_setter', 'tf.train.replica_device_setter', (['ps_tasks'], {}), '(ps_tasks)\n', (11859, 11869), True, 'import tensorflow as tf\n'), ((14913, 14959), 'os.path.join', 'os.path.join', (['hparams.out_dir', '"""dialogue.ckpt"""'], {}), "(hparams.out_dir, 'dialogue.ckpt')\n", (14925, 14959), False, 'import os\n'), ((16926, 16952), 'random.shuffle', 'random.shuffle', (['input_data'], {}), '(input_data)\n', (16940, 16952), False, 'import random\n'), ((17946, 18055), 'utils.misc_utils.add_summary', 'utils.add_summary', (['summary_writer', 'global_step', "(task_SP_DISTRIBUTED + '_' + 'time')", '(end_time - start_time)'], {}), "(summary_writer, global_step, task_SP_DISTRIBUTED + '_' +\n 'time', end_time - start_time)\n", (17963, 18055), True, 'from utils import misc_utils as utils\n'), ((18106, 18246), 'utils.misc_utils.add_summary', 'utils.add_summary', (['summary_writer', 'global_step', "(task_SP_DISTRIBUTED + '_' + 'train_ratio')", '(train_stats[0] * 1.0 / (train_stats[1] + 0.1))'], {}), "(summary_writer, global_step, task_SP_DISTRIBUTED + '_' +\n 'train_ratio', train_stats[0] * 1.0 / (train_stats[1] + 0.1))\n", (18123, 18246), True, 'from utils import misc_utils as utils\n'), ((2956, 2981), 'json.dumps', 'json.dumps', (['generated_obj'], {}), '(generated_obj)\n', (2966, 2981), False, 'import json\n'), ((7829, 7862), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7860, 7862), True, 'import tensorflow as tf\n'), ((7879, 7902), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (7900, 7902), True, 'import tensorflow as tf\n'), ((12568, 12609), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (12582, 12609), True, 'import tensorflow as tf\n'), ((12745, 12811), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'device_count': "{'GPU': 0}"}), "(allow_soft_placement=True, device_count={'GPU': 0})\n", (12759, 12811), True, 'import tensorflow as tf\n'), ((13380, 13426), 'os.path.join', 'os.path.join', (['hparams.out_dir', '"""dialogue.ckpt"""'], {}), "(hparams.out_dir, 'dialogue.ckpt')\n", (13392, 13426), False, 'import os\n'), ((17777, 17823), 'os.path.join', 'os.path.join', (['hparams.out_dir', '"""dialogue.ckpt"""'], {}), "(hparams.out_dir, 'dialogue.ckpt')\n", (17789, 17823), False, 'import os\n')]
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import os.path
import unittest2
from st2common.util.file_system import get_file_list
CURRENT_DIR = os.path.dirname(__file__)
ST2TESTS_DIR = os.path.join(CURRENT_DIR, '../../../st2tests/st2tests')
class FileSystemUtilsTestCase(unittest2.TestCase):
def test_get_file_list(self):
# Standard exclude pattern
directory = os.path.join(ST2TESTS_DIR, 'policies')
expected = [
'mock_exception.py',
'concurrency.py',
'__init__.py',
'meta/mock_exception.yaml',
'meta/concurrency.yaml',
'meta/__init__.py'
]
result = get_file_list(directory=directory, exclude_patterns=['*.pyc'])
self.assertItemsEqual(expected, result)
# Custom exclude pattern
expected = [
'mock_exception.py',
'concurrency.py',
'__init__.py',
'meta/__init__.py'
]
result = get_file_list(directory=directory, exclude_patterns=['*.pyc', '*.yaml'])
self.assertItemsEqual(expected, result)
|
[
"st2common.util.file_system.get_file_list",
"os.path.dirname",
"os.path.join"
] |
[((931, 956), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (946, 956), False, 'import os\n'), ((972, 1027), 'os.path.join', 'os.path.join', (['CURRENT_DIR', '"""../../../st2tests/st2tests"""'], {}), "(CURRENT_DIR, '../../../st2tests/st2tests')\n", (984, 1027), False, 'import os\n'), ((1170, 1208), 'os.path.join', 'os.path.join', (['ST2TESTS_DIR', '"""policies"""'], {}), "(ST2TESTS_DIR, 'policies')\n", (1182, 1208), False, 'import os\n'), ((1455, 1517), 'st2common.util.file_system.get_file_list', 'get_file_list', ([], {'directory': 'directory', 'exclude_patterns': "['*.pyc']"}), "(directory=directory, exclude_patterns=['*.pyc'])\n", (1468, 1517), False, 'from st2common.util.file_system import get_file_list\n'), ((1769, 1841), 'st2common.util.file_system.get_file_list', 'get_file_list', ([], {'directory': 'directory', 'exclude_patterns': "['*.pyc', '*.yaml']"}), "(directory=directory, exclude_patterns=['*.pyc', '*.yaml'])\n", (1782, 1841), False, 'from st2common.util.file_system import get_file_list\n')]
|
import os
from pypy.interpreter.error import operationerrfmt, OperationError
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import (
TypeDef, interp_attrproperty, generic_new_descr)
from pypy.module.exceptions.interp_exceptions import W_IOError
from pypy.module._io.interp_fileio import W_FileIO
from pypy.module._io.interp_textio import W_TextIOWrapper
from rpython.rtyper.module.ll_os_stat import STAT_FIELD_TYPES
class W_BlockingIOError(W_IOError):
def __init__(self, space):
W_IOError.__init__(self, space)
self.written = 0
@unwrap_spec(written=int)
def descr_init(self, space, w_errno, w_strerror, written=0):
W_IOError.descr_init(self, space, [w_errno, w_strerror])
self.written = written
W_BlockingIOError.typedef = TypeDef(
'BlockingIOError', W_IOError.typedef,
__doc__ = ("Exception raised when I/O would block "
"on a non-blocking I/O stream"),
__new__ = generic_new_descr(W_BlockingIOError),
__init__ = interp2app(W_BlockingIOError.descr_init),
characters_written = interp_attrproperty('written', W_BlockingIOError),
)
DEFAULT_BUFFER_SIZE = 8 * 1024
@unwrap_spec(mode=str, buffering=int,
encoding="str_or_None", errors="str_or_None",
newline="str_or_None", closefd=bool)
def open(space, w_file, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True):
from pypy.module._io.interp_bufferedio import (W_BufferedRandom,
W_BufferedWriter, W_BufferedReader)
if not (space.isinstance_w(w_file, space.w_basestring) or
space.isinstance_w(w_file, space.w_int) or
space.isinstance_w(w_file, space.w_long)):
raise operationerrfmt(space.w_TypeError,
"invalid file: %s", space.str_w(space.repr(w_file))
)
reading = writing = appending = updating = text = binary = universal = False
uniq_mode = {}
for flag in mode:
uniq_mode[flag] = None
if len(uniq_mode) != len(mode):
raise operationerrfmt(space.w_ValueError,
"invalid mode: %s", mode
)
for flag in mode:
if flag == "r":
reading = True
elif flag == "w":
writing = True
elif flag == "a":
appending = True
elif flag == "+":
updating = True
elif flag == "t":
text = True
elif flag == "b":
binary = True
elif flag == "U":
universal = True
reading = True
else:
raise operationerrfmt(space.w_ValueError,
"invalid mode: %s", mode
)
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
if appending:
rawmode += "a"
if updating:
rawmode += "+"
if universal and (writing or appending):
raise OperationError(space.w_ValueError,
space.wrap("can't use U and writing mode at once")
)
if text and binary:
raise OperationError(space.w_ValueError,
space.wrap("can't have text and binary mode at once")
)
if reading + writing + appending > 1:
raise OperationError(space.w_ValueError,
space.wrap("must have exactly one of read/write/append mode")
)
if binary and encoding is not None:
raise OperationError(space.w_ValueError,
space.wrap("binary mode doesn't take an errors argument")
)
if binary and newline is not None:
raise OperationError(space.w_ValueError,
space.wrap("binary mode doesn't take a newline argument")
)
w_raw = space.call_function(
space.gettypefor(W_FileIO), w_file, space.wrap(rawmode), space.wrap(closefd)
)
isatty = space.is_true(space.call_method(w_raw, "isatty"))
line_buffering = buffering == 1 or (buffering < 0 and isatty)
if line_buffering:
buffering = -1
if buffering < 0:
buffering = DEFAULT_BUFFER_SIZE
if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES:
fileno = space.int_w(space.call_method(w_raw, "fileno"))
try:
st = os.fstat(fileno)
except OSError:
# Errors should never pass silently, except this one time.
pass
else:
if st.st_blksize > 1:
buffering = st.st_blksize
if buffering < 0:
raise OperationError(space.w_ValueError,
space.wrap("invalid buffering size")
)
if buffering == 0:
if not binary:
raise OperationError(space.w_ValueError,
space.wrap("can't have unbuffered text I/O")
)
return w_raw
if updating:
buffer_cls = W_BufferedRandom
elif writing or appending:
buffer_cls = W_BufferedWriter
elif reading:
buffer_cls = W_BufferedReader
else:
raise operationerrfmt(space.w_ValueError, "unknown mode: '%s'", mode)
w_buffer = space.call_function(
space.gettypefor(buffer_cls), w_raw, space.wrap(buffering)
)
if binary:
return w_buffer
w_wrapper = space.call_function(space.gettypefor(W_TextIOWrapper),
w_buffer,
space.wrap(encoding),
space.wrap(errors),
space.wrap(newline),
space.wrap(line_buffering)
)
space.setattr(w_wrapper, space.wrap("mode"), space.wrap(mode))
return w_wrapper
|
[
"pypy.interpreter.gateway.interp2app",
"pypy.module.exceptions.interp_exceptions.W_IOError.descr_init",
"pypy.interpreter.gateway.unwrap_spec",
"pypy.interpreter.typedef.interp_attrproperty",
"pypy.interpreter.typedef.generic_new_descr",
"pypy.module.exceptions.interp_exceptions.W_IOError.__init__",
"pypy.interpreter.error.operationerrfmt",
"os.fstat"
] |
[((1201, 1325), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'mode': 'str', 'buffering': 'int', 'encoding': '"""str_or_None"""', 'errors': '"""str_or_None"""', 'newline': '"""str_or_None"""', 'closefd': 'bool'}), "(mode=str, buffering=int, encoding='str_or_None', errors=\n 'str_or_None', newline='str_or_None', closefd=bool)\n", (1212, 1325), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((605, 629), 'pypy.interpreter.gateway.unwrap_spec', 'unwrap_spec', ([], {'written': 'int'}), '(written=int)\n', (616, 629), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((542, 573), 'pypy.module.exceptions.interp_exceptions.W_IOError.__init__', 'W_IOError.__init__', (['self', 'space'], {}), '(self, space)\n', (560, 573), False, 'from pypy.module.exceptions.interp_exceptions import W_IOError\n'), ((703, 759), 'pypy.module.exceptions.interp_exceptions.W_IOError.descr_init', 'W_IOError.descr_init', (['self', 'space', '[w_errno, w_strerror]'], {}), '(self, space, [w_errno, w_strerror])\n', (723, 759), False, 'from pypy.module.exceptions.interp_exceptions import W_IOError\n'), ((990, 1026), 'pypy.interpreter.typedef.generic_new_descr', 'generic_new_descr', (['W_BlockingIOError'], {}), '(W_BlockingIOError)\n', (1007, 1026), False, 'from pypy.interpreter.typedef import TypeDef, interp_attrproperty, generic_new_descr\n'), ((1043, 1083), 'pypy.interpreter.gateway.interp2app', 'interp2app', (['W_BlockingIOError.descr_init'], {}), '(W_BlockingIOError.descr_init)\n', (1053, 1083), False, 'from pypy.interpreter.gateway import interp2app, unwrap_spec\n'), ((1110, 1159), 'pypy.interpreter.typedef.interp_attrproperty', 'interp_attrproperty', (['"""written"""', 'W_BlockingIOError'], {}), "('written', W_BlockingIOError)\n", (1129, 1159), False, 'from pypy.interpreter.typedef import TypeDef, interp_attrproperty, generic_new_descr\n'), ((2062, 2123), 'pypy.interpreter.error.operationerrfmt', 'operationerrfmt', (['space.w_ValueError', '"""invalid mode: %s"""', 'mode'], {}), "(space.w_ValueError, 'invalid mode: %s', mode)\n", (2077, 2123), False, 'from pypy.interpreter.error import operationerrfmt, OperationError\n'), ((4262, 4278), 'os.fstat', 'os.fstat', (['fileno'], {}), '(fileno)\n', (4270, 4278), False, 'import os\n'), ((5037, 5100), 'pypy.interpreter.error.operationerrfmt', 'operationerrfmt', (['space.w_ValueError', '"""unknown mode: \'%s\'"""', 'mode'], {}), '(space.w_ValueError, "unknown mode: \'%s\'", mode)\n', (5052, 5100), False, 'from pypy.interpreter.error import operationerrfmt, OperationError\n'), ((2596, 2657), 'pypy.interpreter.error.operationerrfmt', 'operationerrfmt', (['space.w_ValueError', '"""invalid mode: %s"""', 'mode'], {}), "(space.w_ValueError, 'invalid mode: %s', mode)\n", (2611, 2657), False, 'from pypy.interpreter.error import operationerrfmt, OperationError\n')]
|
import logging
import time
import numpy as np
from param_net.param_fcnet import ParamFCNetRegression
from keras.losses import mean_squared_error
from keras import backend as K
from smac.tae.execute_func import ExecuteTAFuncDict
from smac.scenario.scenario import Scenario
from smac.facade.smac_facade import SMAC
from mini_autonet.intensification.intensification import Intensifier
from mini_autonet.tae.simple_tae import SimpleTAFunc
from sklearn.preprocessing import StandardScaler
from ConfigSpace.configuration_space import Configuration
from ConfigSpace.util import fix_types
class DNN(object):
def __init__(self, num_layers_range:list=[1,4,10],
use_dropout:bool=False,
use_l2_regularization:bool=False):
self.logger = logging.getLogger("AutoNet")
self.num_layers_range = num_layers_range
self.use_dropout = use_dropout
self.use_l2_regularization = use_l2_regularization
self.scalerX = StandardScaler()
self.scalerY = StandardScaler()
def fit(self, X, y,
max_epochs:int,
runcount_limit:int=100,
wc_limit:int=60,
config:Configuration=None,
seed:int=12345):
X_all = None
y_all = None
for idx, (X_q, y_q) in enumerate(zip(X,y)):
if idx == 0:
X_all = X_q
y_all = y_q
else:
X_all = np.vstack([X_all, X_q])
y_all = np.hstack([y_all, y_q])
def obj_func(config, instance=None, seed=None, pc=None):
# continuing training if pc is given
# otherwise, construct new DNN
models = []
losses = []
for model_idx, [train_idx, valid_idx] in enumerate([[0,3],[3,0],[1,2],[2,1]]):
X_train = X[train_idx]
y_train = y[train_idx]
X_train = self.scalerX.fit_transform(X_train)
y_train = np.log10(y_train)
y_train = self.scalerY.fit_transform(y_train.reshape(-1, 1))[:,0]
X_valid, y_valid = X_all, y_all
X_valid = self.scalerX.transform(X_valid)
y_valid = np.log10(y_valid)
y_valid = self.scalerY.transform(y_valid.reshape(-1, 1))[:,0]
if pc is None:
if model_idx == 0:
K.clear_session()
model = ParamFCNetRegression(config=config, n_feat=X_train.shape[1],
expected_num_epochs=max_epochs,
n_outputs=1,
verbose=1)
else:
model = pc[model_idx]
history = model.train(X_train=X_train, y_train=y_train,
X_valid=X_valid, y_valid=y_valid,
n_epochs=1)
models.append(model)
final_loss = history["val_loss"][-1]
losses.append(final_loss)
return np.mean(losses), {"model": models}
taf = SimpleTAFunc(obj_func)
cs = ParamFCNetRegression.get_config_space(num_layers_range=self.num_layers_range,
use_l2_regularization=self.use_l2_regularization,
use_dropout=self.use_dropout)
print(cs)
ac_scenario = Scenario({"run_obj": "quality", # we optimize quality
"runcount-limit": max_epochs*runcount_limit,
"wallclock-limit": wc_limit,
"cost_for_crash": 10,
"cs": cs,
"deterministic": "true",
"abort_on_first_run_crash": False,
"output-dir": ""
})
intensifier = Intensifier(tae_runner=taf, stats=None,
traj_logger=None,
rng=np.random.RandomState(42),
run_limit=100,
max_epochs=max_epochs)
if isinstance(config, dict):
config = fix_types(configuration=dict, configuration_space=cs)
config = Configuration(configuration_space=cs, values=config)
elif runcount_limit==1:
config = cs.get_default_configuration()
else:
smac = SMAC(scenario=ac_scenario,
tae_runner=taf,
rng=np.random.RandomState(seed),
intensifier=intensifier)
smac.solver.runhistory.overwrite_existing_runs = True
config = smac.optimize()
print("Final Incumbent")
print(config)
X_all = self.scalerX.fit_transform(X_all)
y_all = np.log10(y_all)
y_all = self.scalerY.fit_transform(y_all.reshape(-1, 1))[:,0]
K.clear_session()
start_time = time.time()
model = ParamFCNetRegression(config=config, n_feat=X_all.shape[1],
expected_num_epochs=max_epochs,
n_outputs=1,
verbose=1)
history = model.train(X_train=X_all, y_train=y_all,
X_valid=X_all, y_valid=y_all,
n_epochs=max_epochs)
print("Training Time: %f" %(time.time() - start_time))
self.model = model
def predict(self, X_test):
X_test = self.scalerX.transform(X_test)
y_pred = self.model.predict(X_test)
y_pred = self.scalerY.inverse_transform(y_pred)
y_pred = 10**y_pred
y_pred = np.maximum(0.0005,y_pred)
return y_pred
|
[
"sklearn.preprocessing.StandardScaler",
"keras.backend.clear_session",
"numpy.maximum",
"param_net.param_fcnet.ParamFCNetRegression",
"mini_autonet.tae.simple_tae.SimpleTAFunc",
"ConfigSpace.util.fix_types",
"numpy.random.RandomState",
"time.time",
"param_net.param_fcnet.ParamFCNetRegression.get_config_space",
"numpy.hstack",
"smac.scenario.scenario.Scenario",
"numpy.mean",
"ConfigSpace.configuration_space.Configuration",
"numpy.vstack",
"numpy.log10",
"logging.getLogger"
] |
[((784, 812), 'logging.getLogger', 'logging.getLogger', (['"""AutoNet"""'], {}), "('AutoNet')\n", (801, 812), False, 'import logging\n'), ((1006, 1022), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1020, 1022), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1046, 1062), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1060, 1062), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3386, 3408), 'mini_autonet.tae.simple_tae.SimpleTAFunc', 'SimpleTAFunc', (['obj_func'], {}), '(obj_func)\n', (3398, 3408), False, 'from mini_autonet.tae.simple_tae import SimpleTAFunc\n'), ((3422, 3588), 'param_net.param_fcnet.ParamFCNetRegression.get_config_space', 'ParamFCNetRegression.get_config_space', ([], {'num_layers_range': 'self.num_layers_range', 'use_l2_regularization': 'self.use_l2_regularization', 'use_dropout': 'self.use_dropout'}), '(num_layers_range=self.\n num_layers_range, use_l2_regularization=self.use_l2_regularization,\n use_dropout=self.use_dropout)\n', (3459, 3588), False, 'from param_net.param_fcnet import ParamFCNetRegression\n'), ((3742, 3972), 'smac.scenario.scenario.Scenario', 'Scenario', (["{'run_obj': 'quality', 'runcount-limit': max_epochs * runcount_limit,\n 'wallclock-limit': wc_limit, 'cost_for_crash': 10, 'cs': cs,\n 'deterministic': 'true', 'abort_on_first_run_crash': False,\n 'output-dir': ''}"], {}), "({'run_obj': 'quality', 'runcount-limit': max_epochs *\n runcount_limit, 'wallclock-limit': wc_limit, 'cost_for_crash': 10, 'cs':\n cs, 'deterministic': 'true', 'abort_on_first_run_crash': False,\n 'output-dir': ''})\n", (3750, 3972), False, 'from smac.scenario.scenario import Scenario\n'), ((5197, 5212), 'numpy.log10', 'np.log10', (['y_all'], {}), '(y_all)\n', (5205, 5212), True, 'import numpy as np\n'), ((5308, 5325), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5323, 5325), True, 'from keras import backend as K\n'), ((5356, 5367), 'time.time', 'time.time', ([], {}), '()\n', (5365, 5367), False, 'import time\n'), ((5393, 5511), 'param_net.param_fcnet.ParamFCNetRegression', 'ParamFCNetRegression', ([], {'config': 'config', 'n_feat': 'X_all.shape[1]', 'expected_num_epochs': 'max_epochs', 'n_outputs': '(1)', 'verbose': '(1)'}), '(config=config, n_feat=X_all.shape[1],\n expected_num_epochs=max_epochs, n_outputs=1, verbose=1)\n', (5413, 5511), False, 'from param_net.param_fcnet import ParamFCNetRegression\n'), ((6196, 6222), 'numpy.maximum', 'np.maximum', (['(0.0005)', 'y_pred'], {}), '(0.0005, y_pred)\n', (6206, 6222), True, 'import numpy as np\n'), ((4534, 4587), 'ConfigSpace.util.fix_types', 'fix_types', ([], {'configuration': 'dict', 'configuration_space': 'cs'}), '(configuration=dict, configuration_space=cs)\n', (4543, 4587), False, 'from ConfigSpace.util import fix_types\n'), ((4609, 4661), 'ConfigSpace.configuration_space.Configuration', 'Configuration', ([], {'configuration_space': 'cs', 'values': 'config'}), '(configuration_space=cs, values=config)\n', (4622, 4661), False, 'from ConfigSpace.configuration_space import Configuration\n'), ((1479, 1502), 'numpy.vstack', 'np.vstack', (['[X_all, X_q]'], {}), '([X_all, X_q])\n', (1488, 1502), True, 'import numpy as np\n'), ((1527, 1550), 'numpy.hstack', 'np.hstack', (['[y_all, y_q]'], {}), '([y_all, y_q])\n', (1536, 1550), True, 'import numpy as np\n'), ((2066, 2083), 'numpy.log10', 'np.log10', (['y_train'], {}), '(y_train)\n', (2074, 2083), True, 'import numpy as np\n'), ((2315, 2332), 'numpy.log10', 'np.log10', (['y_valid'], {}), '(y_valid)\n', (2323, 2332), True, 'import numpy as np\n'), ((3336, 3351), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3343, 3351), True, 'import numpy as np\n'), ((4368, 4393), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (4389, 4393), True, 'import numpy as np\n'), ((2589, 2709), 'param_net.param_fcnet.ParamFCNetRegression', 'ParamFCNetRegression', ([], {'config': 'config', 'n_feat': 'X_train.shape[1]', 'expected_num_epochs': 'max_epochs', 'n_outputs': '(1)', 'verbose': '(1)'}), '(config=config, n_feat=X_train.shape[1],\n expected_num_epochs=max_epochs, n_outputs=1, verbose=1)\n', (2609, 2709), False, 'from param_net.param_fcnet import ParamFCNetRegression\n'), ((5872, 5883), 'time.time', 'time.time', ([], {}), '()\n', (5881, 5883), False, 'import time\n'), ((2543, 2560), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2558, 2560), True, 'from keras import backend as K\n'), ((4867, 4894), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4888, 4894), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.24 on 2021-06-23 13:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("events", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(name="event", options={},),
]
|
[
"django.db.migrations.AlterModelOptions"
] |
[((216, 270), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""event"""', 'options': '{}'}), "(name='event', options={})\n", (244, 270), False, 'from django.db import migrations\n')]
|
import unittest
import os
import numpy as np
from skimage.io import imsave
import torch
import neural_renderer as nr
current_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(current_dir, 'data')
class TestCore(unittest.TestCase):
def test_tetrahedron(self):
vertices_ref = np.array(
[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]],
'float32')
faces_ref = np.array(
[
[1, 3, 2],
[3, 1, 0],
[2, 0, 1],
[0, 2, 3]],
'int32')
obj_file = os.path.join(data_dir, 'tetrahedron.obj')
vertices, faces = nr.load_obj(obj_file, False)
assert (torch.allclose(torch.from_numpy(vertices_ref).cuda(), vertices))
assert (torch.allclose(torch.from_numpy(faces_ref).cuda(), faces))
vertices, faces = nr.load_obj(obj_file, True)
assert (torch.allclose(torch.from_numpy(vertices_ref).cuda() * 2 - 1.0, vertices))
assert (torch.allclose(torch.from_numpy(faces_ref).cuda(), faces))
def test_teapot(self):
obj_file = os.path.join(data_dir, 'teapot.obj')
vertices, faces = nr.load_obj(obj_file)
assert (faces.shape[0] == 2464)
assert (vertices.shape[0] == 1292)
def test_texture(self):
renderer = nr.Renderer(camera_mode='look_at')
vertices, faces, textures = nr.load_obj(
os.path.join(data_dir, '1cde62b063e14777c9152a706245d48/model.obj'), load_texture=True)
renderer.eye = nr.get_points_from_angles(2, 15, 30)
images, _, _ = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :])
images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
imsave(os.path.join(data_dir, 'car.png'), images[0])
vertices, faces, textures = nr.load_obj(
os.path.join(data_dir, '4e49873292196f02574b5684eaec43e9/model.obj'), load_texture=True, texture_size=16)
renderer.eye = nr.get_points_from_angles(2, 15, -90)
images, _, _ = renderer.render(vertices[None, :, :], faces[None, :, :], textures[None, :, :, :, :, :])
images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
imsave(os.path.join(data_dir, 'display.png'), images[0])
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.realpath",
"neural_renderer.load_obj",
"numpy.array",
"neural_renderer.get_points_from_angles",
"neural_renderer.Renderer",
"os.path.join",
"torch.from_numpy"
] |
[((189, 222), 'os.path.join', 'os.path.join', (['current_dir', '"""data"""'], {}), "(current_dir, 'data')\n", (201, 222), False, 'import os\n'), ((150, 176), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n'), ((2219, 2234), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2232, 2234), False, 'import unittest\n'), ((309, 403), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]]', '"""float32"""'], {}), "([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0\n ]], 'float32')\n", (317, 403), True, 'import numpy as np\n'), ((449, 512), 'numpy.array', 'np.array', (['[[1, 3, 2], [3, 1, 0], [2, 0, 1], [0, 2, 3]]', '"""int32"""'], {}), "([[1, 3, 2], [3, 1, 0], [2, 0, 1], [0, 2, 3]], 'int32')\n", (457, 512), True, 'import numpy as np\n'), ((579, 620), 'os.path.join', 'os.path.join', (['data_dir', '"""tetrahedron.obj"""'], {}), "(data_dir, 'tetrahedron.obj')\n", (591, 620), False, 'import os\n'), ((643, 671), 'neural_renderer.load_obj', 'nr.load_obj', (['obj_file', '(False)'], {}), '(obj_file, False)\n', (654, 671), True, 'import neural_renderer as nr\n'), ((842, 869), 'neural_renderer.load_obj', 'nr.load_obj', (['obj_file', '(True)'], {}), '(obj_file, True)\n', (853, 869), True, 'import neural_renderer as nr\n'), ((1071, 1107), 'os.path.join', 'os.path.join', (['data_dir', '"""teapot.obj"""'], {}), "(data_dir, 'teapot.obj')\n", (1083, 1107), False, 'import os\n'), ((1130, 1151), 'neural_renderer.load_obj', 'nr.load_obj', (['obj_file'], {}), '(obj_file)\n', (1141, 1151), True, 'import neural_renderer as nr\n'), ((1271, 1305), 'neural_renderer.Renderer', 'nr.Renderer', ([], {'camera_mode': '"""look_at"""'}), "(camera_mode='look_at')\n", (1282, 1305), True, 'import neural_renderer as nr\n'), ((1474, 1510), 'neural_renderer.get_points_from_angles', 'nr.get_points_from_angles', (['(2)', '(15)', '(30)'], {}), '(2, 15, 30)\n', (1499, 1510), True, 'import neural_renderer as nr\n'), ((1919, 1956), 'neural_renderer.get_points_from_angles', 'nr.get_points_from_angles', (['(2)', '(15)', '(-90)'], {}), '(2, 15, -90)\n', (1944, 1956), True, 'import neural_renderer as nr\n'), ((1362, 1429), 'os.path.join', 'os.path.join', (['data_dir', '"""1cde62b063e14777c9152a706245d48/model.obj"""'], {}), "(data_dir, '1cde62b063e14777c9152a706245d48/model.obj')\n", (1374, 1429), False, 'import os\n'), ((1692, 1725), 'os.path.join', 'os.path.join', (['data_dir', '"""car.png"""'], {}), "(data_dir, 'car.png')\n", (1704, 1725), False, 'import os\n'), ((1794, 1862), 'os.path.join', 'os.path.join', (['data_dir', '"""4e49873292196f02574b5684eaec43e9/model.obj"""'], {}), "(data_dir, '4e49873292196f02574b5684eaec43e9/model.obj')\n", (1806, 1862), False, 'import os\n'), ((2138, 2175), 'os.path.join', 'os.path.join', (['data_dir', '"""display.png"""'], {}), "(data_dir, 'display.png')\n", (2150, 2175), False, 'import os\n'), ((699, 729), 'torch.from_numpy', 'torch.from_numpy', (['vertices_ref'], {}), '(vertices_ref)\n', (715, 729), False, 'import torch\n'), ((776, 803), 'torch.from_numpy', 'torch.from_numpy', (['faces_ref'], {}), '(faces_ref)\n', (792, 803), False, 'import torch\n'), ((984, 1011), 'torch.from_numpy', 'torch.from_numpy', (['faces_ref'], {}), '(faces_ref)\n', (1000, 1011), False, 'import torch\n'), ((897, 927), 'torch.from_numpy', 'torch.from_numpy', (['vertices_ref'], {}), '(vertices_ref)\n', (913, 927), False, 'import torch\n')]
|
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from django.contrib.auth.models import User
from bucketlist.serializers import BucketlistSerializer, BucketlistItemSerializer, UserRegisterSerializer
from bucketlist.models import Bucketlist, BucketlistItem
class UserRegisterAPIView(generics.CreateAPIView):
"""For /api/v1/auth/register url path"""
permission_classes = (permissions.AllowAny,)
queryset = User.objects.all()
serializer_class = UserRegisterSerializer
class BucketlistList(generics.ListCreateAPIView):
"""For /api/v1/bucketlist/ url path"""
serializer_class = BucketlistSerializer
def get_queryset(self):
"""Return bucketlists belonging to user logged in."""
q = self.request.query_params.get('q', None)
user_id = self.request.user.id
if q:
bucketlists = Bucketlist.objects.all().filter(created_by=user_id)
results = []
for bucketlist in bucketlists:
if q.lower() in bucketlist.name.lower():
results.append(bucketlist)
return results
else:
return Bucketlist.objects.filter(created_by=user_id)
class BucketlistDetail(generics.RetrieveUpdateDestroyAPIView):
"""For /api/v1/bucketlist/<id> url path"""
serializer_class = BucketlistSerializer
def get_queryset(self):
"""Return bucketlists belonging to user logged in."""
user_id = self.request.user.id
return Bucketlist.objects.filter(created_by=user_id)
class BucketlistItemList(generics.CreateAPIView):
"""For /api/v1/bucketlist/<id>/items/ url path"""
queryset = BucketlistItem.objects.all()
serializer_class = BucketlistItemSerializer
def create(self, request, **kwargs):
pk = self.kwargs.get('pk')
title = request.data.get('title')
data = {
'bucketlist': pk,
'title': title
}
serializer = self.serializer_class(data=data)
if serializer.is_valid():
item = BucketlistItem(**serializer.validated_data)
item.save()
return Response(
{"message": "Item '{}' created successfully".format(title)},
status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_404_NOT_FOUND)
class BucketlistItemDetail(generics.UpdateAPIView, generics.DestroyAPIView):
"""For /api/v1/bucketlist/<bucketlist-id>/items/<id> url path"""
serializer_class = BucketlistItemSerializer
def get_queryset(self):
"""Return items belonging to bucketlist specified on URL."""
# Get bucketlists by logged in user
user_id = self.request.user.id
bucketlist_id = self.kwargs['bucketlist_id']
bucketlist = Bucketlist.objects.filter(
created_by=user_id, id=bucketlist_id)
return BucketlistItem.objects.filter(bucketlist=bucketlist)
def update(serializer, *args, **kwargs):
user_id = serializer.request.user.id
bucketlist_id = kwargs['bucketlist_id']
bucketlist = Bucketlist.objects.filter(
created_by=user_id, id=bucketlist_id)
item = BucketlistItem.objects.filter(bucketlist=bucketlist, id=kwargs.get('pk')).first()
if item:
item.bucketlist = Bucketlist(pk=kwargs.get('bucketlist_id'))
item.title = serializer.request.data.get('title', item.title)
item.done = serializer.request.data.get('done', item.done)
item.save()
return Response(
{"message": "Item '{}' updated successfully".format(item.id)},
status=status.HTTP_200_OK)
return Response({"error": "You cannot update this item"},
status=status.HTTP_404_NOT_FOUND)
|
[
"bucketlist.models.Bucketlist.objects.all",
"bucketlist.models.Bucketlist.objects.filter",
"rest_framework.response.Response",
"bucketlist.models.BucketlistItem.objects.filter",
"bucketlist.models.BucketlistItem",
"bucketlist.models.BucketlistItem.objects.all",
"django.contrib.auth.models.User.objects.all"
] |
[((474, 492), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (490, 492), False, 'from django.contrib.auth.models import User\n'), ((1706, 1734), 'bucketlist.models.BucketlistItem.objects.all', 'BucketlistItem.objects.all', ([], {}), '()\n', (1732, 1734), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((1539, 1584), 'bucketlist.models.Bucketlist.objects.filter', 'Bucketlist.objects.filter', ([], {'created_by': 'user_id'}), '(created_by=user_id)\n', (1564, 1584), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((2330, 2391), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_404_NOT_FOUND'}), '(serializer.errors, status=status.HTTP_404_NOT_FOUND)\n', (2338, 2391), False, 'from rest_framework.response import Response\n'), ((2845, 2908), 'bucketlist.models.Bucketlist.objects.filter', 'Bucketlist.objects.filter', ([], {'created_by': 'user_id', 'id': 'bucketlist_id'}), '(created_by=user_id, id=bucketlist_id)\n', (2870, 2908), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((2938, 2990), 'bucketlist.models.BucketlistItem.objects.filter', 'BucketlistItem.objects.filter', ([], {'bucketlist': 'bucketlist'}), '(bucketlist=bucketlist)\n', (2967, 2990), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((3151, 3214), 'bucketlist.models.Bucketlist.objects.filter', 'Bucketlist.objects.filter', ([], {'created_by': 'user_id', 'id': 'bucketlist_id'}), '(created_by=user_id, id=bucketlist_id)\n', (3176, 3214), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((3754, 3843), 'rest_framework.response.Response', 'Response', (["{'error': 'You cannot update this item'}"], {'status': 'status.HTTP_404_NOT_FOUND'}), "({'error': 'You cannot update this item'}, status=status.\n HTTP_404_NOT_FOUND)\n", (3762, 3843), False, 'from rest_framework.response import Response\n'), ((1190, 1235), 'bucketlist.models.Bucketlist.objects.filter', 'Bucketlist.objects.filter', ([], {'created_by': 'user_id'}), '(created_by=user_id)\n', (1215, 1235), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((2093, 2136), 'bucketlist.models.BucketlistItem', 'BucketlistItem', ([], {}), '(**serializer.validated_data)\n', (2107, 2136), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n'), ((903, 927), 'bucketlist.models.Bucketlist.objects.all', 'Bucketlist.objects.all', ([], {}), '()\n', (925, 927), False, 'from bucketlist.models import Bucketlist, BucketlistItem\n')]
|
# dictionaries are 'like' hash-maps:
database = {} # empty dict
database = {"boss": "Foo Bar"} # dict with data
# Add a key value pair to a dictionary:
database["foo"] = "test"
person = {}
name, age, height = "Alice", 23, 1.8
person["age"] = age
person["height"] = height
person["description"] = "{} is a {} year old, {}m tall girl."\
.format(name,age,height)
# Dictionary in dictionary:
database[name] = person
# List in dictionary:
database["fib_numbers"] = [1,1,2,3,5]
# Print using JavaScript Object Notation:
import json
print(json.dumps(database, indent=2))
|
[
"json.dumps"
] |
[((595, 625), 'json.dumps', 'json.dumps', (['database'], {'indent': '(2)'}), '(database, indent=2)\n', (605, 625), False, 'import json\n')]
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for waymo_ap_metric."""
from lingvo import compat as tf
from lingvo.core import py_utils
from lingvo.core import test_utils
from lingvo.tasks.car.waymo import waymo_ap_metric
from lingvo.tasks.car.waymo import waymo_metadata
import numpy as np
from waymo_open_dataset import label_pb2
FLAGS = tf.flags.FLAGS
class APTest(test_utils.TestCase):
def testWaymoAPConfig(self):
metadata = waymo_metadata.WaymoMetadata()
# Use 2D metric.
config = waymo_ap_metric.BuildWaymoMetricConfig(metadata, '2d', [])
vehicle_idx = label_pb2.Label.Type.Value('TYPE_VEHICLE')
ped_idx = label_pb2.Label.Type.Value('TYPE_PEDESTRIAN')
cyc_idx = label_pb2.Label.Type.Value('TYPE_CYCLIST')
thresholds_meta = metadata.IoUThresholds()
self.assertNear(config.iou_thresholds[vehicle_idx],
thresholds_meta['Vehicle'], 1e-6)
self.assertNear(config.iou_thresholds[ped_idx],
thresholds_meta['Pedestrian'], 1e-6)
self.assertNear(config.iou_thresholds[cyc_idx], thresholds_meta['Cyclist'],
1e-6)
def testPerfectBox(self):
metadata = waymo_metadata.WaymoMetadata()
params = waymo_ap_metric.WaymoAPMetrics.Params(metadata)
m = params.Instantiate()
# Make one update with a perfect box.
update_dict = py_utils.NestedMap(
groundtruth_labels=np.array([1]),
groundtruth_bboxes=np.ones(shape=(1, 7)),
groundtruth_difficulties=np.zeros(shape=(1)),
groundtruth_num_points=None,
detection_scores=np.ones(shape=(5, 1)),
detection_boxes=np.ones(shape=(5, 1, 7)),
detection_heights_in_pixels=np.ones(shape=(5, 1)))
m.Update('1234', update_dict)
waymo_ap = m.value
self.assertAllClose(waymo_ap, 1. / 3.)
# Write a summary.
summary = m.Summary('foo')
# Check that both AP and APH are in the tags.
tags = [v.tag for v in summary.value]
self.assertIn('foo/Pedestrian/AP_LEVEL_1', tags)
self.assertIn('foo/Pedestrian/APH_LEVEL_1', tags)
self.assertIn('foo/Pedestrian/AP_LEVEL_2', tags)
self.assertIn('foo/Pedestrian/APH_LEVEL_2', tags)
def testWaymoBreakdowns(self):
metadata = waymo_metadata.WaymoMetadata()
params = waymo_ap_metric.WaymoAPMetrics.Params(metadata)
params.waymo_breakdown_metrics = ['RANGE', 'VELOCITY']
m = params.Instantiate()
# Make one update with a perfect box.
update_dict = py_utils.NestedMap(
groundtruth_labels=np.array([1]),
groundtruth_bboxes=np.ones(shape=(1, 7)),
groundtruth_difficulties=np.zeros(shape=(1)),
groundtruth_num_points=None,
groundtruth_speed=np.zeros(shape=(1, 2)),
detection_scores=np.ones(shape=(5, 1)),
detection_boxes=np.ones(shape=(5, 1, 7)),
detection_heights_in_pixels=np.ones(shape=(5, 1)))
m.Update('1234', update_dict)
# Write a summary.
summary = m.Summary('foo')
# Check that the summary value for default ap and
# a waymo breakdown version by range is the same.
for v in summary.value:
if v.tag == 'foo/Vehicle/AP_LEVEL_1':
default_val = v.simple_value
elif v.tag == 'foo/Vehicle/APH_LEVEL_1':
aph_default_val = v.simple_value
elif v.tag == 'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1':
ap_bd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/AP_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2':
ap_bd_val_l2 = v.simple_value
elif v.tag == 'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_1':
aph_bd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/APH_RANGE_TYPE_VEHICLE_[0, 30)_LEVEL_2':
aph_bd_val_l2 = v.simple_value
elif v.tag == 'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_1':
vbd_val_l1 = v.simple_value
elif v.tag == 'foo_extra/AP_VELOCITY_TYPE_VEHICLE_STATIONARY_LEVEL_2':
vbd_val_l2 = v.simple_value
self.assertEqual(ap_bd_val_l1, default_val)
self.assertEqual(ap_bd_val_l2, default_val)
self.assertEqual(aph_bd_val_l1, aph_default_val)
self.assertEqual(aph_bd_val_l2, aph_default_val)
self.assertEqual(vbd_val_l1, default_val)
self.assertEqual(vbd_val_l2, default_val)
# Check that eval classes not evaluated are not present.
tags = [v.tag for v in summary.value]
self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_1', tags)
self.assertNotIn('foo_extra/APH_RANGE_TYPE_SIGN_[0, 30)_LEVEL_2', tags)
if __name__ == '__main__':
tf.test.main()
|
[
"lingvo.compat.test.main",
"waymo_open_dataset.label_pb2.Label.Type.Value",
"lingvo.tasks.car.waymo.waymo_ap_metric.WaymoAPMetrics.Params",
"numpy.zeros",
"numpy.ones",
"lingvo.tasks.car.waymo.waymo_ap_metric.BuildWaymoMetricConfig",
"numpy.array",
"lingvo.tasks.car.waymo.waymo_metadata.WaymoMetadata"
] |
[((5177, 5191), 'lingvo.compat.test.main', 'tf.test.main', ([], {}), '()\n', (5189, 5191), True, 'from lingvo import compat as tf\n'), ((1111, 1141), 'lingvo.tasks.car.waymo.waymo_metadata.WaymoMetadata', 'waymo_metadata.WaymoMetadata', ([], {}), '()\n', (1139, 1141), False, 'from lingvo.tasks.car.waymo import waymo_metadata\n'), ((1176, 1234), 'lingvo.tasks.car.waymo.waymo_ap_metric.BuildWaymoMetricConfig', 'waymo_ap_metric.BuildWaymoMetricConfig', (['metadata', '"""2d"""', '[]'], {}), "(metadata, '2d', [])\n", (1214, 1234), False, 'from lingvo.tasks.car.waymo import waymo_ap_metric\n'), ((1253, 1295), 'waymo_open_dataset.label_pb2.Label.Type.Value', 'label_pb2.Label.Type.Value', (['"""TYPE_VEHICLE"""'], {}), "('TYPE_VEHICLE')\n", (1279, 1295), False, 'from waymo_open_dataset import label_pb2\n'), ((1310, 1355), 'waymo_open_dataset.label_pb2.Label.Type.Value', 'label_pb2.Label.Type.Value', (['"""TYPE_PEDESTRIAN"""'], {}), "('TYPE_PEDESTRIAN')\n", (1336, 1355), False, 'from waymo_open_dataset import label_pb2\n'), ((1370, 1412), 'waymo_open_dataset.label_pb2.Label.Type.Value', 'label_pb2.Label.Type.Value', (['"""TYPE_CYCLIST"""'], {}), "('TYPE_CYCLIST')\n", (1396, 1412), False, 'from waymo_open_dataset import label_pb2\n'), ((1830, 1860), 'lingvo.tasks.car.waymo.waymo_metadata.WaymoMetadata', 'waymo_metadata.WaymoMetadata', ([], {}), '()\n', (1858, 1860), False, 'from lingvo.tasks.car.waymo import waymo_metadata\n'), ((1874, 1921), 'lingvo.tasks.car.waymo.waymo_ap_metric.WaymoAPMetrics.Params', 'waymo_ap_metric.WaymoAPMetrics.Params', (['metadata'], {}), '(metadata)\n', (1911, 1921), False, 'from lingvo.tasks.car.waymo import waymo_ap_metric\n'), ((2883, 2913), 'lingvo.tasks.car.waymo.waymo_metadata.WaymoMetadata', 'waymo_metadata.WaymoMetadata', ([], {}), '()\n', (2911, 2913), False, 'from lingvo.tasks.car.waymo import waymo_metadata\n'), ((2927, 2974), 'lingvo.tasks.car.waymo.waymo_ap_metric.WaymoAPMetrics.Params', 'waymo_ap_metric.WaymoAPMetrics.Params', (['metadata'], {}), '(metadata)\n', (2964, 2974), False, 'from lingvo.tasks.car.waymo import waymo_ap_metric\n'), ((2058, 2071), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2066, 2071), True, 'import numpy as np\n'), ((2100, 2121), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 7)'}), '(shape=(1, 7))\n', (2107, 2121), True, 'import numpy as np\n'), ((2156, 2173), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1)'}), '(shape=1)\n', (2164, 2173), True, 'import numpy as np\n'), ((2239, 2260), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1)'}), '(shape=(5, 1))\n', (2246, 2260), True, 'import numpy as np\n'), ((2286, 2310), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1, 7)'}), '(shape=(5, 1, 7))\n', (2293, 2310), True, 'import numpy as np\n'), ((2348, 2369), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1)'}), '(shape=(5, 1))\n', (2355, 2369), True, 'import numpy as np\n'), ((3171, 3184), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3179, 3184), True, 'import numpy as np\n'), ((3213, 3234), 'numpy.ones', 'np.ones', ([], {'shape': '(1, 7)'}), '(shape=(1, 7))\n', (3220, 3234), True, 'import numpy as np\n'), ((3269, 3286), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1)'}), '(shape=1)\n', (3277, 3286), True, 'import numpy as np\n'), ((3353, 3375), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 2)'}), '(shape=(1, 2))\n', (3361, 3375), True, 'import numpy as np\n'), ((3402, 3423), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1)'}), '(shape=(5, 1))\n', (3409, 3423), True, 'import numpy as np\n'), ((3449, 3473), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1, 7)'}), '(shape=(5, 1, 7))\n', (3456, 3473), True, 'import numpy as np\n'), ((3511, 3532), 'numpy.ones', 'np.ones', ([], {'shape': '(5, 1)'}), '(shape=(5, 1))\n', (3518, 3532), True, 'import numpy as np\n')]
|
from google.appengine.ext import db
from google.appengine.api.datastore_types import Text
__author__ = "<NAME>, <NAME>, and <NAME>"
__copyright__ = "Copyright 2013-2015 UKP TU Darmstadt"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "ASL"
class ArgumentationUnit(db.Model):
"""
@author: <NAME>
"""
type = db.StringProperty()
confidence = db.StringProperty()
indices = db.StringProperty()
class DocumentAnnotation(db.Model):
"""
Stores the arg_units that a user marks in a document
@author: <NAME>
"""
user_id = db.StringProperty()
document = db.StringProperty()
arg_units = db.ListProperty(Text)
relations = db.ListProperty(Text)
concepts = db.ListProperty(Text)
notes = db.TextProperty()
approved = db.BooleanProperty()
class CorpusMetadata(db.Model):
"""
Stores corpus metadata
@author: <NAME>
"""
segmenter = db.StringProperty()
preprocessing_date = db.StringProperty()
class Document(db.Model):
"""
@author: <NAME>
"""
title = db.StringProperty()
text = db.TextProperty()
url = db.StringProperty()
filename = db.StringProperty()
topic = db.StringProperty()
num_sentences = db.IntegerProperty()
num_tokens = db.IntegerProperty()
class UserData(db.Model):
"""
Stores which document the user is currently working on.
@author: <NAME>
"""
user_id = db.StringProperty()
current_doc = db.ReferenceProperty(collection_name="prop_current_document")
selected_topics = db.StringListProperty()
|
[
"google.appengine.ext.db.StringProperty",
"google.appengine.ext.db.ReferenceProperty",
"google.appengine.ext.db.StringListProperty",
"google.appengine.ext.db.BooleanProperty",
"google.appengine.ext.db.ListProperty",
"google.appengine.ext.db.TextProperty",
"google.appengine.ext.db.IntegerProperty"
] |
[((336, 355), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (353, 355), False, 'from google.appengine.ext import db\n'), ((373, 392), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (390, 392), False, 'from google.appengine.ext import db\n'), ((407, 426), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (424, 426), False, 'from google.appengine.ext import db\n'), ((573, 592), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (590, 592), False, 'from google.appengine.ext import db\n'), ((608, 627), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (625, 627), False, 'from google.appengine.ext import db\n'), ((644, 665), 'google.appengine.ext.db.ListProperty', 'db.ListProperty', (['Text'], {}), '(Text)\n', (659, 665), False, 'from google.appengine.ext import db\n'), ((682, 703), 'google.appengine.ext.db.ListProperty', 'db.ListProperty', (['Text'], {}), '(Text)\n', (697, 703), False, 'from google.appengine.ext import db\n'), ((719, 740), 'google.appengine.ext.db.ListProperty', 'db.ListProperty', (['Text'], {}), '(Text)\n', (734, 740), False, 'from google.appengine.ext import db\n'), ((753, 770), 'google.appengine.ext.db.TextProperty', 'db.TextProperty', ([], {}), '()\n', (768, 770), False, 'from google.appengine.ext import db\n'), ((786, 806), 'google.appengine.ext.db.BooleanProperty', 'db.BooleanProperty', ([], {}), '()\n', (804, 806), False, 'from google.appengine.ext import db\n'), ((921, 940), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (938, 940), False, 'from google.appengine.ext import db\n'), ((966, 985), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (983, 985), False, 'from google.appengine.ext import db\n'), ((1062, 1081), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (1079, 1081), False, 'from google.appengine.ext import db\n'), ((1093, 1110), 'google.appengine.ext.db.TextProperty', 'db.TextProperty', ([], {}), '()\n', (1108, 1110), False, 'from google.appengine.ext import db\n'), ((1121, 1140), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (1138, 1140), False, 'from google.appengine.ext import db\n'), ((1156, 1175), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (1173, 1175), False, 'from google.appengine.ext import db\n'), ((1188, 1207), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (1205, 1207), False, 'from google.appengine.ext import db\n'), ((1228, 1248), 'google.appengine.ext.db.IntegerProperty', 'db.IntegerProperty', ([], {}), '()\n', (1246, 1248), False, 'from google.appengine.ext import db\n'), ((1266, 1286), 'google.appengine.ext.db.IntegerProperty', 'db.IntegerProperty', ([], {}), '()\n', (1284, 1286), False, 'from google.appengine.ext import db\n'), ((1425, 1444), 'google.appengine.ext.db.StringProperty', 'db.StringProperty', ([], {}), '()\n', (1442, 1444), False, 'from google.appengine.ext import db\n'), ((1463, 1524), 'google.appengine.ext.db.ReferenceProperty', 'db.ReferenceProperty', ([], {'collection_name': '"""prop_current_document"""'}), "(collection_name='prop_current_document')\n", (1483, 1524), False, 'from google.appengine.ext import db\n'), ((1547, 1570), 'google.appengine.ext.db.StringListProperty', 'db.StringListProperty', ([], {}), '()\n', (1568, 1570), False, 'from google.appengine.ext import db\n')]
|
import os
import shutil
class MakeDirs:
"""
This class will be used to create the directory which are needed to run the program
"""
def __init__(self):
self.current_path = os.getcwd()
self.func_list = [self.create_models, self.create_file_from_db, self.create_raw_files_validated,
self.create_log_files, self.create_batch_files, self.create_output_file,
self.create_kmeans]
self.current_index = -1
self.last_index = len(self.func_list)
def create_models(self):
"""
This method will create the empty model directory.
"""
model_path = self.current_path + '/models'
if os.path.exists(model_path):
shutil.rmtree(model_path)
os.makedirs(model_path)
def create_file_from_db(self):
"""
This method will create the Prediction and Training folders to store the csv file for the input
"""
prediction_path = self.current_path + '/Prediction_FileFromDB'
training_path = self.current_path + '/Training_FileFromDB'
if os.path.exists(prediction_path):
shutil.rmtree(prediction_path)
os.makedirs(prediction_path)
if os.path.exists(training_path):
shutil.rmtree(training_path)
os.makedirs(training_path)
def create_raw_files_validated(self):
"""
This method will create the Prediction and Training raw folders to store the good file for the input
"""
prediction_path = self.current_path + '/Prediction_Raw_Files_Validated'
training_path = self.current_path + '/Training_Raw_Files_Validated'
if os.path.exists(prediction_path):
shutil.rmtree(prediction_path)
os.makedirs(prediction_path)
if os.path.exists(training_path):
shutil.rmtree(training_path)
os.makedirs(training_path)
def create_log_files(self):
"""
This method will create the Prediction and Training log files to store logs of the program
"""
prediction_path = self.current_path + '/Training_Logs'
training_path = self.current_path + '/Prediction_Logs'
if os.path.exists(prediction_path):
shutil.rmtree(prediction_path)
os.makedirs(prediction_path)
if os.path.exists(training_path):
shutil.rmtree(training_path)
os.makedirs(training_path)
def create_batch_files(self):
"""
This method will create the Prediction and Training batch files for the model training and prediction
"""
prediction_path = self.current_path + '/Training_Batch_Files'
training_path = self.current_path + '/Prediction_Batch_files'
if os.path.exists(prediction_path):
shutil.rmtree(prediction_path)
os.makedirs(prediction_path)
if os.path.exists(training_path):
shutil.rmtree(training_path)
os.makedirs(training_path)
def create_output_file(self):
"""
This method will create the Prediction output file
"""
prediction_path = self.current_path + '/Prediction_Output_File'
if os.path.exists(prediction_path):
shutil.rmtree(prediction_path)
os.makedirs(prediction_path)
def create_kmeans(self):
"""
This method will create the Kmeans folder to store the elbow plot
"""
k_means_path = self.current_path + '/K_Means_ElbowPlot'
if os.path.exists(k_means_path):
shutil.rmtree(k_means_path)
os.makedirs(k_means_path)
def __iter__(self):
return self
def __next__(self):
# Iterate over the functions and return the function definition to call
self.current_index += 1
if self.current_index < self.last_index:
return self.func_list[self.current_index]
else:
raise StopIteration
make_dir_obj = MakeDirs()
# Iterate over the function of the class and make the class objects
for func in make_dir_obj:
func()
|
[
"os.getcwd",
"shutil.rmtree",
"os.path.exists",
"os.makedirs"
] |
[((199, 210), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (208, 210), False, 'import os\n'), ((714, 740), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (728, 740), False, 'import os\n'), ((789, 812), 'os.makedirs', 'os.makedirs', (['model_path'], {}), '(model_path)\n', (800, 812), False, 'import os\n'), ((1127, 1158), 'os.path.exists', 'os.path.exists', (['prediction_path'], {}), '(prediction_path)\n', (1141, 1158), False, 'import os\n'), ((1212, 1240), 'os.makedirs', 'os.makedirs', (['prediction_path'], {}), '(prediction_path)\n', (1223, 1240), False, 'import os\n'), ((1253, 1282), 'os.path.exists', 'os.path.exists', (['training_path'], {}), '(training_path)\n', (1267, 1282), False, 'import os\n'), ((1334, 1360), 'os.makedirs', 'os.makedirs', (['training_path'], {}), '(training_path)\n', (1345, 1360), False, 'import os\n'), ((1705, 1736), 'os.path.exists', 'os.path.exists', (['prediction_path'], {}), '(prediction_path)\n', (1719, 1736), False, 'import os\n'), ((1790, 1818), 'os.makedirs', 'os.makedirs', (['prediction_path'], {}), '(prediction_path)\n', (1801, 1818), False, 'import os\n'), ((1831, 1860), 'os.path.exists', 'os.path.exists', (['training_path'], {}), '(training_path)\n', (1845, 1860), False, 'import os\n'), ((1912, 1938), 'os.makedirs', 'os.makedirs', (['training_path'], {}), '(training_path)\n', (1923, 1938), False, 'import os\n'), ((2233, 2264), 'os.path.exists', 'os.path.exists', (['prediction_path'], {}), '(prediction_path)\n', (2247, 2264), False, 'import os\n'), ((2318, 2346), 'os.makedirs', 'os.makedirs', (['prediction_path'], {}), '(prediction_path)\n', (2329, 2346), False, 'import os\n'), ((2359, 2388), 'os.path.exists', 'os.path.exists', (['training_path'], {}), '(training_path)\n', (2373, 2388), False, 'import os\n'), ((2440, 2466), 'os.makedirs', 'os.makedirs', (['training_path'], {}), '(training_path)\n', (2451, 2466), False, 'import os\n'), ((2788, 2819), 'os.path.exists', 'os.path.exists', (['prediction_path'], {}), '(prediction_path)\n', (2802, 2819), False, 'import os\n'), ((2873, 2901), 'os.makedirs', 'os.makedirs', (['prediction_path'], {}), '(prediction_path)\n', (2884, 2901), False, 'import os\n'), ((2914, 2943), 'os.path.exists', 'os.path.exists', (['training_path'], {}), '(training_path)\n', (2928, 2943), False, 'import os\n'), ((2995, 3021), 'os.makedirs', 'os.makedirs', (['training_path'], {}), '(training_path)\n', (3006, 3021), False, 'import os\n'), ((3224, 3255), 'os.path.exists', 'os.path.exists', (['prediction_path'], {}), '(prediction_path)\n', (3238, 3255), False, 'import os\n'), ((3309, 3337), 'os.makedirs', 'os.makedirs', (['prediction_path'], {}), '(prediction_path)\n', (3320, 3337), False, 'import os\n'), ((3542, 3570), 'os.path.exists', 'os.path.exists', (['k_means_path'], {}), '(k_means_path)\n', (3556, 3570), False, 'import os\n'), ((3621, 3646), 'os.makedirs', 'os.makedirs', (['k_means_path'], {}), '(k_means_path)\n', (3632, 3646), False, 'import os\n'), ((754, 779), 'shutil.rmtree', 'shutil.rmtree', (['model_path'], {}), '(model_path)\n', (767, 779), False, 'import shutil\n'), ((1172, 1202), 'shutil.rmtree', 'shutil.rmtree', (['prediction_path'], {}), '(prediction_path)\n', (1185, 1202), False, 'import shutil\n'), ((1296, 1324), 'shutil.rmtree', 'shutil.rmtree', (['training_path'], {}), '(training_path)\n', (1309, 1324), False, 'import shutil\n'), ((1750, 1780), 'shutil.rmtree', 'shutil.rmtree', (['prediction_path'], {}), '(prediction_path)\n', (1763, 1780), False, 'import shutil\n'), ((1874, 1902), 'shutil.rmtree', 'shutil.rmtree', (['training_path'], {}), '(training_path)\n', (1887, 1902), False, 'import shutil\n'), ((2278, 2308), 'shutil.rmtree', 'shutil.rmtree', (['prediction_path'], {}), '(prediction_path)\n', (2291, 2308), False, 'import shutil\n'), ((2402, 2430), 'shutil.rmtree', 'shutil.rmtree', (['training_path'], {}), '(training_path)\n', (2415, 2430), False, 'import shutil\n'), ((2833, 2863), 'shutil.rmtree', 'shutil.rmtree', (['prediction_path'], {}), '(prediction_path)\n', (2846, 2863), False, 'import shutil\n'), ((2957, 2985), 'shutil.rmtree', 'shutil.rmtree', (['training_path'], {}), '(training_path)\n', (2970, 2985), False, 'import shutil\n'), ((3269, 3299), 'shutil.rmtree', 'shutil.rmtree', (['prediction_path'], {}), '(prediction_path)\n', (3282, 3299), False, 'import shutil\n'), ((3584, 3611), 'shutil.rmtree', 'shutil.rmtree', (['k_means_path'], {}), '(k_means_path)\n', (3597, 3611), False, 'import shutil\n')]
|
import diceroll
import mentionrouter
import yaml
import random
import re
from slackeventsapi import SlackEventAdapter
from slackclient import SlackClient
def get_config( conf_file ):
with open( conf_file ) as x: conf_str = x.read()
conf = yaml.load( conf_str )
return conf
CONF = get_config( "config.yaml" )
slack_events_adapter = SlackEventAdapter(
CONF["slack_signing_secret"],
endpoint="/slack/events"
)
slack_client = SlackClient( CONF["slack_bot_token"] )
mention_router = mentionrouter.Router()
mention_router.register( "roll",
diceroll.DiceRollHandler( CONF["max_dice"], CONF["max_dice_size"], slack_client )
)
@slack_events_adapter.on("app_mention")
def handle_message(event_data):
message = event_data["event"]
mention_router.handle_mention(
user = message['user'],
text = message['text'],
channel = message['channel'],
)
@slack_events_adapter.on("error")
def error_handler(err):
print("ERROR: " + str(err))
# Start the server
slack_events_adapter.start(
host=CONF["host"],
port=CONF["port"],
#debug=True,
)
|
[
"yaml.load",
"diceroll.DiceRollHandler",
"slackclient.SlackClient",
"mentionrouter.Router",
"slackeventsapi.SlackEventAdapter"
] |
[((347, 420), 'slackeventsapi.SlackEventAdapter', 'SlackEventAdapter', (["CONF['slack_signing_secret']"], {'endpoint': '"""/slack/events"""'}), "(CONF['slack_signing_secret'], endpoint='/slack/events')\n", (364, 420), False, 'from slackeventsapi import SlackEventAdapter\n'), ((446, 482), 'slackclient.SlackClient', 'SlackClient', (["CONF['slack_bot_token']"], {}), "(CONF['slack_bot_token'])\n", (457, 482), False, 'from slackclient import SlackClient\n'), ((503, 525), 'mentionrouter.Router', 'mentionrouter.Router', ([], {}), '()\n', (523, 525), False, 'import mentionrouter\n'), ((249, 268), 'yaml.load', 'yaml.load', (['conf_str'], {}), '(conf_str)\n', (258, 268), False, 'import yaml\n'), ((563, 642), 'diceroll.DiceRollHandler', 'diceroll.DiceRollHandler', (["CONF['max_dice']", "CONF['max_dice_size']", 'slack_client'], {}), "(CONF['max_dice'], CONF['max_dice_size'], slack_client)\n", (587, 642), False, 'import diceroll\n')]
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/_settingsDialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_settingsDialog(object):
def setupUi(self, settingsDialog):
settingsDialog.setObjectName("settingsDialog")
settingsDialog.resize(432, 139)
font = QtGui.QFont()
font.setFamily("Segoe UI")
settingsDialog.setFont(font)
self.verticalLayout = QtWidgets.QVBoxLayout(settingsDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalWidget = QtWidgets.QWidget(settingsDialog)
self.verticalWidget.setObjectName("verticalWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.setSkeletonConfidenceWidget = QtWidgets.QWidget(self.verticalWidget)
self.setSkeletonConfidenceWidget.setObjectName("setSkeletonConfidenceWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.setSkeletonConfidenceWidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.setSkeletonConfidenceLabel = QtWidgets.QLabel(self.setSkeletonConfidenceWidget)
self.setSkeletonConfidenceLabel.setObjectName("setSkeletonConfidenceLabel")
self.horizontalLayout.addWidget(self.setSkeletonConfidenceLabel)
self.setSkeletonConfidenceLineEdit = QtWidgets.QLineEdit(self.setSkeletonConfidenceWidget)
self.setSkeletonConfidenceLineEdit.setObjectName("setSkeletonConfidenceLineEdit")
self.horizontalLayout.addWidget(self.setSkeletonConfidenceLineEdit)
self.verticalLayout_2.addWidget(self.setSkeletonConfidenceWidget)
self.verticalLayout.addWidget(self.verticalWidget)
self.buttonBox = QtWidgets.QDialogButtonBox(settingsDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(settingsDialog)
self.buttonBox.accepted.connect(settingsDialog.accept)
self.buttonBox.rejected.connect(settingsDialog.reject)
QtCore.QMetaObject.connectSlotsByName(settingsDialog)
def retranslateUi(self, settingsDialog):
_translate = QtCore.QCoreApplication.translate
settingsDialog.setWindowTitle(_translate("settingsDialog", "Dialog"))
self.setSkeletonConfidenceLabel.setText(_translate("settingsDialog", "Skeleton Confidence Threshold (0-1)"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
settingsDialog = QtWidgets.QDialog()
ui = Ui_settingsDialog()
ui.setupUi(settingsDialog)
settingsDialog.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QDialog",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QDialogButtonBox"
] |
[((2802, 2834), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (2824, 2834), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2856, 2875), 'PyQt5.QtWidgets.QDialog', 'QtWidgets.QDialog', ([], {}), '()\n', (2873, 2875), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((434, 447), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (445, 447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((550, 587), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['settingsDialog'], {}), '(settingsDialog)\n', (571, 587), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((678, 711), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['settingsDialog'], {}), '(settingsDialog)\n', (695, 711), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((804, 846), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self.verticalWidget'], {}), '(self.verticalWidget)\n', (825, 846), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((954, 992), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.verticalWidget'], {}), '(self.verticalWidget)\n', (971, 992), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1111, 1166), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', (['self.setSkeletonConfidenceWidget'], {}), '(self.setSkeletonConfidenceWidget)\n', (1132, 1166), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1273, 1323), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.setSkeletonConfidenceWidget'], {}), '(self.setSkeletonConfidenceWidget)\n', (1289, 1323), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1526, 1579), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.setSkeletonConfidenceWidget'], {}), '(self.setSkeletonConfidenceWidget)\n', (1545, 1579), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1904, 1946), 'PyQt5.QtWidgets.QDialogButtonBox', 'QtWidgets.QDialogButtonBox', (['settingsDialog'], {}), '(settingsDialog)\n', (1930, 1946), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2396, 2449), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['settingsDialog'], {}), '(settingsDialog)\n', (2433, 2449), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
from pyflink.common.serialization import SimpleStringEncoder
from pyflink.common.typeinfo import Types
from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic
from pyflink.datastream.connectors import StreamingFileSink
def tutorial():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
#env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
ds = env.from_collection(
collection=[(1, 'aaa'), (2, 'bbb')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
ds.add_sink(StreamingFileSink
.for_row_format('F:/github/openjw/penter/bigdata_study/pyflink1.x/batch/demo01/output', SimpleStringEncoder())
.build())
env.execute("tutorial_job")
if __name__ == '__main__':
tutorial()
|
[
"pyflink.common.typeinfo.Types.INT",
"pyflink.datastream.StreamExecutionEnvironment.get_execution_environment",
"pyflink.common.serialization.SimpleStringEncoder",
"pyflink.common.typeinfo.Types.STRING"
] |
[((269, 323), 'pyflink.datastream.StreamExecutionEnvironment.get_execution_environment', 'StreamExecutionEnvironment.get_execution_environment', ([], {}), '()\n', (321, 323), False, 'from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic\n'), ((526, 537), 'pyflink.common.typeinfo.Types.INT', 'Types.INT', ([], {}), '()\n', (535, 537), False, 'from pyflink.common.typeinfo import Types\n'), ((539, 553), 'pyflink.common.typeinfo.Types.STRING', 'Types.STRING', ([], {}), '()\n', (551, 553), False, 'from pyflink.common.typeinfo import Types\n'), ((695, 716), 'pyflink.common.serialization.SimpleStringEncoder', 'SimpleStringEncoder', ([], {}), '()\n', (714, 716), False, 'from pyflink.common.serialization import SimpleStringEncoder\n')]
|
"""Databases access: read and write, occasionally sorting query results."""
import os
import re
from . import ROOT_DIR
from tinydb import TinyDB, Query
TEAM_DATABASE = os.path.join(ROOT_DIR, "data/teams.json")
GAME_DATABASE = os.path.join("data/game.json")
POV_DATABASE = os.path.join("data/pov.json")
# Read-only info about teams, players and roles
team_info = TinyDB(TEAM_DATABASE)
# Where to save raw game data from the OCR system
game_db = TinyDB(GAME_DATABASE)
# Where to save POV intervals data (formatted data)
pov_db = TinyDB(POV_DATABASE)
def purge_db(db_name=game_db):
db_name.drop_tables()
return db_name
def initialize_db(video_name):
if video_name:
db_path = os.path.join("data", f"{video_name}.json")
video_db = TinyDB(db_path)
else:
video_db = game_db
return video_db
def save_player_pov(map_round, player_name, frame_nb, video_db=game_db):
game_map = video_db.table(map_round)
game_map.insert({"player": player_name, "frame": frame_nb})
# intervals is an array of player intervals returned by format_data.get_pov_data
def save_player_intervals(map_round, intervals, database=pov_db):
game_map = database.table(map_round)
game_map.insert_multiple(intervals)
def get_team(shorthand):
"""Given a team shorthand name, gets the corresponding team object."""
teams = team_info.table("teams")
Team = Query()
return teams.get(Team.shorthand == shorthand)
def get_teams(teams_list):
"""
Given a list of team shorthand names, gets a dictionary of team objects,
with the keys being the shorthand names.
"""
teams = team_info.table("teams")
Team = Query()
return {t: teams.get(Team.shorthand == t) for t in teams_list}
def get_player(player):
players = team_info.table("players")
Players = Query()
return players.get(Players.name == player)
def get_players(teams_list):
"""Given a list of team shorthand names,
returns the players for all teams in one big list."""
teams = team_info.table("teams")
Team = Query()
result = []
for t in teams_list:
roles = teams.get(Team.shorthand == t)["players"]
result += [{"name": p, "role": "damage", "team": t} for p in roles["damage"]]
result += [{"name": p, "role": "tank", "team": t} for p in roles["tank"]]
result += [{"name": p, "role": "support", "team": t} for p in roles["support"]]
return result
def get_frames(player, table=game_db):
"""Returns a sorted list of frame numbers
in which the player POV was visible on-screen."""
Frames = Query()
player_frames = table.search(
Frames.player.matches(player["name"], flags=re.IGNORECASE)
)
return sorted([f["frame"] for f in player_frames])
|
[
"tinydb.Query",
"tinydb.TinyDB",
"os.path.join"
] |
[((170, 211), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""data/teams.json"""'], {}), "(ROOT_DIR, 'data/teams.json')\n", (182, 211), False, 'import os\n'), ((228, 258), 'os.path.join', 'os.path.join', (['"""data/game.json"""'], {}), "('data/game.json')\n", (240, 258), False, 'import os\n'), ((274, 303), 'os.path.join', 'os.path.join', (['"""data/pov.json"""'], {}), "('data/pov.json')\n", (286, 303), False, 'import os\n'), ((365, 386), 'tinydb.TinyDB', 'TinyDB', (['TEAM_DATABASE'], {}), '(TEAM_DATABASE)\n', (371, 386), False, 'from tinydb import TinyDB, Query\n'), ((448, 469), 'tinydb.TinyDB', 'TinyDB', (['GAME_DATABASE'], {}), '(GAME_DATABASE)\n', (454, 469), False, 'from tinydb import TinyDB, Query\n'), ((532, 552), 'tinydb.TinyDB', 'TinyDB', (['POV_DATABASE'], {}), '(POV_DATABASE)\n', (538, 552), False, 'from tinydb import TinyDB, Query\n'), ((1397, 1404), 'tinydb.Query', 'Query', ([], {}), '()\n', (1402, 1404), False, 'from tinydb import TinyDB, Query\n'), ((1670, 1677), 'tinydb.Query', 'Query', ([], {}), '()\n', (1675, 1677), False, 'from tinydb import TinyDB, Query\n'), ((1826, 1833), 'tinydb.Query', 'Query', ([], {}), '()\n', (1831, 1833), False, 'from tinydb import TinyDB, Query\n'), ((2064, 2071), 'tinydb.Query', 'Query', ([], {}), '()\n', (2069, 2071), False, 'from tinydb import TinyDB, Query\n'), ((2601, 2608), 'tinydb.Query', 'Query', ([], {}), '()\n', (2606, 2608), False, 'from tinydb import TinyDB, Query\n'), ((702, 744), 'os.path.join', 'os.path.join', (['"""data"""', 'f"""{video_name}.json"""'], {}), "('data', f'{video_name}.json')\n", (714, 744), False, 'import os\n'), ((764, 779), 'tinydb.TinyDB', 'TinyDB', (['db_path'], {}), '(db_path)\n', (770, 779), False, 'from tinydb import TinyDB, Query\n')]
|
#!/usr/bin/python
import re
import sys
import getopt
from subprocess import Popen, PIPE
from pprint import pprint as ppr
import os
_python3 = sys.version_info.major == 3
def Usage(s):
print('Usage: {} -t <cstest_path> [-f <file_name.cs>] [-d <directory>]'.format(s))
sys.exit(-1)
def get_report_file(toolpath, filepath, getDetails, cmt_out):
cmd = [toolpath, '-f', filepath]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# stdout
failed_tests = []
if _python3:
stdout = bytes.decode(stdout)
stderr = bytes.decode(stderr)
# print('---> stdout\n', stdout)
# print('---> stderr\n', stderr)
matches = re.finditer(r'\[\s+RUN\s+\]\s+(.*)\n\[\s+FAILED\s+\]', stdout)
for match in matches:
failed_tests.append(match.group(1))
# stderr
counter = 0
details = []
for line in stderr.split('\n'):
if '[ PASSED ] 0 test(s).' in line:
break
elif 'LINE' in line:
continue
elif 'ERROR' in line and ' --- ' in line:
parts = line.split(' --- ')
try:
details.append((parts[1], failed_tests[counter], parts[2]))
except IndexError:
details.append(('', 'Unknown test', line.split(' --- ')[1]))
counter += 1
else:
continue
print('\n[-] There are/is {} failed test(s)'.format(len(details)))
if len(details) > 0 and getDetails:
print('[-] Detailed report for {}:\n'.format(filepath))
for c, f, d in details:
print('\t[+] {}: {}\n\t\t{}\n'.format(f, c, d))
print('\n')
return 0
elif len(details) > 0:
for c, f, d in details:
if len(f) > 0 and cmt_out is True:
tmp_cmd = ['sed', '-E', '-i.bak', 's/({})(.*)/\/\/ \\1\\2/g'.format(c), filepath]
sed_proc = Popen(tmp_cmd, stdout=PIPE, stderr=PIPE)
sed_proc.communicate()
tmp_cmd2 = ['rm', '-f', filepath + '.bak']
rm_proc = Popen(tmp_cmd2, stdout=PIPE, stderr=PIPE)
rm_proc.communicate()
return 0
return 1
def get_report_folder(toolpath, folderpath, details, cmt_out):
result = 1
for root, dirs, files in os.walk(folderpath):
path = root.split(os.sep)
for f in files:
if f.split('.')[-1] == 'cs':
print('[-] Target:', f,)
result *= get_report_file(toolpath, os.sep.join(x for x in path) + os.sep + f, details, cmt_out)
sys.exit(result ^ 1)
if __name__ == '__main__':
Done = False
details = False
toolpath = ''
cmt_out = False
try:
opts, args = getopt.getopt(sys.argv[1:], "ct:f:d:D")
for opt, arg in opts:
if opt == '-f':
result = get_report_file(toolpath, arg, details, cmt_out)
if result == 0:
sys.exit(1)
Done = True
elif opt == '-d':
get_report_folder(toolpath, arg, details, cmt_out)
Done = True
elif opt == '-t':
toolpath = arg
elif opt == '-D':
details = True
elif opt == '-c':
cmt_out = True
except getopt.GetoptError:
Usage(sys.argv[0])
if Done is False:
Usage(sys.argv[0])
|
[
"subprocess.Popen",
"getopt.getopt",
"re.finditer",
"os.walk",
"os.sep.join",
"sys.exit"
] |
[((273, 285), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (281, 285), False, 'import sys\n'), ((394, 430), 'subprocess.Popen', 'Popen', (['cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdout=PIPE, stderr=PIPE)\n', (399, 430), False, 'from subprocess import Popen, PIPE\n'), ((657, 728), 're.finditer', 're.finditer', (['"""\\\\[\\\\s+RUN\\\\s+\\\\]\\\\s+(.*)\\\\n\\\\[\\\\s+FAILED\\\\s+\\\\]"""', 'stdout'], {}), "('\\\\[\\\\s+RUN\\\\s+\\\\]\\\\s+(.*)\\\\n\\\\[\\\\s+FAILED\\\\s+\\\\]', stdout)\n", (668, 728), False, 'import re\n'), ((1979, 1998), 'os.walk', 'os.walk', (['folderpath'], {}), '(folderpath)\n', (1986, 1998), False, 'import os\n'), ((2211, 2231), 'sys.exit', 'sys.exit', (['(result ^ 1)'], {}), '(result ^ 1)\n', (2219, 2231), False, 'import sys\n'), ((2344, 2383), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""ct:f:d:D"""'], {}), "(sys.argv[1:], 'ct:f:d:D')\n", (2357, 2383), False, 'import getopt\n'), ((1658, 1698), 'subprocess.Popen', 'Popen', (['tmp_cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(tmp_cmd, stdout=PIPE, stderr=PIPE)\n', (1663, 1698), False, 'from subprocess import Popen, PIPE\n'), ((1787, 1828), 'subprocess.Popen', 'Popen', (['tmp_cmd2'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(tmp_cmd2, stdout=PIPE, stderr=PIPE)\n', (1792, 1828), False, 'from subprocess import Popen, PIPE\n'), ((2514, 2525), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2522, 2525), False, 'import sys\n'), ((2147, 2175), 'os.sep.join', 'os.sep.join', (['(x for x in path)'], {}), '(x for x in path)\n', (2158, 2175), False, 'import os\n')]
|
# -*- coding: UTF-8 -*-
"""
处理数据集 和 标签数据集的代码:(主要是对原始数据集裁剪)
处理方式:分别处理
注意修改 输入 输出目录 和 生成的文件名
output_dir = "./label_temp"
input_dir = "./label"
"""
import cv2
import os
import sys
import time
def get_img(input_dir):
img_paths = []
for (path,dirname,filenames) in os.walk(input_dir):
for filename in filenames:
img_paths.append(path+'/'+filename)
print("img_paths:",img_paths)
return img_paths
def cut_img(img_paths,output_dir):
scale = len(img_paths)
for i,img_path in enumerate(img_paths):
a = "#"* int(i/1000)
b = "."*(int(scale/1000)-int(i/1000))
c = (i/scale)*100
time.sleep(0.2)
print('正在处理图像: %s' % img_path.split('/')[-1])
img = cv2.imread(img_path)
weight = img.shape[1]
if weight>1600: # 正常发票
cropImg = img[50:200, 700:1500] # 裁剪【y1,y2:x1,x2】
#cropImg = cv2.resize(cropImg, None, fx=0.5, fy=0.5,
#interpolation=cv2.INTER_CUBIC) #缩小图像
cv2.imwrite(output_dir + '/' + img_path.split('/')[-1], cropImg)
else: # 卷帘发票
cropImg_01 = img[30:150, 50:600]
cv2.imwrite(output_dir + '/'+img_path.split('/')[-1], cropImg_01)
print('{:^3.3f}%[{}>>{}]'.format(c,a,b))
if __name__ == '__main__':
output_dir = "src/python-opencv/003-cut/dist_img" # 保存截取的图像目录
input_dir = "src/python-opencv" # 读取图片目录表
img_paths = get_img(input_dir)
print('图片获取完成 。。。!')
cut_img(img_paths,output_dir)
|
[
"cv2.imread",
"os.walk",
"time.sleep"
] |
[((286, 304), 'os.walk', 'os.walk', (['input_dir'], {}), '(input_dir)\n', (293, 304), False, 'import os\n'), ((661, 676), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (671, 676), False, 'import time\n'), ((745, 765), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (755, 765), False, 'import cv2\n')]
|
import numpy as np
import torch
from scipy.special import comb
class Metric:
def __init__(self, **kwargs):
self.requires = ['kmeans_cosine', 'kmeans_nearest_cosine', 'features_cosine', 'target_labels']
self.name = 'c_f1'
def __call__(self, target_labels, computed_cluster_labels_cosine, features_cosine, centroids_cosine):
if isinstance(features_cosine, torch.Tensor):
features_cosine = features_cosine.detach().cpu().numpy()
d = np.zeros(len(features_cosine))
for i in range(len(features_cosine)):
d[i] = np.linalg.norm(features_cosine[i, :] - centroids_cosine[computed_cluster_labels_cosine[i], :])
labels_pred = np.zeros(len(features_cosine))
for i in np.unique(computed_cluster_labels_cosine):
index = np.where(computed_cluster_labels_cosine == i)[0]
ind = np.argmin(d[index])
cid = index[ind]
labels_pred[index] = cid
N = len(target_labels)
# cluster n_labels
avail_labels = np.unique(target_labels)
n_labels = len(avail_labels)
# count the number of objects in each cluster
count_cluster = np.zeros(n_labels)
for i in range(n_labels):
count_cluster[i] = len(np.where(target_labels == avail_labels[i])[0])
# build a mapping from item_id to item index
keys = np.unique(labels_pred)
num_item = len(keys)
values = range(num_item)
item_map = dict()
for i in range(len(keys)):
item_map.update([(keys[i], values[i])])
# count the number of objects of each item
count_item = np.zeros(num_item)
for i in range(N):
index = item_map[labels_pred[i]]
count_item[index] = count_item[index] + 1
# compute True Positive (TP) plus False Positive (FP)
# tp_fp = 0
tp_fp = comb(count_cluster, 2).sum()
# for k in range(n_labels):
# if count_cluster[k] > 1:
# tp_fp = tp_fp + comb(count_cluster[k], 2)
# compute True Positive (TP)
tp = 0
for k in range(n_labels):
member = np.where(target_labels == avail_labels[k])[0]
member_ids = labels_pred[member]
count = np.zeros(num_item)
for j in range(len(member)):
index = item_map[member_ids[j]]
count[index] = count[index] + 1
# for i in range(num_item):
# if count[i] > 1:
# tp = tp + comb(count[i], 2)
tp += comb(count, 2).sum()
# False Positive (FP)
fp = tp_fp - tp
# Compute False Negative (FN)
count = comb(count_item, 2).sum()
# count = 0
# for j in range(num_item):
# if count_item[j] > 1:
# count = count + comb(count_item[j], 2)
fn = count - tp
# compute F measure
P = tp / (tp + fp)
R = tp / (tp + fn)
beta = 1
F = (beta * beta + 1) * P * R / (beta * beta * P + R)
return F
|
[
"scipy.special.comb",
"numpy.zeros",
"numpy.argmin",
"numpy.where",
"numpy.linalg.norm",
"numpy.unique"
] |
[((747, 788), 'numpy.unique', 'np.unique', (['computed_cluster_labels_cosine'], {}), '(computed_cluster_labels_cosine)\n', (756, 788), True, 'import numpy as np\n'), ((1046, 1070), 'numpy.unique', 'np.unique', (['target_labels'], {}), '(target_labels)\n', (1055, 1070), True, 'import numpy as np\n'), ((1187, 1205), 'numpy.zeros', 'np.zeros', (['n_labels'], {}), '(n_labels)\n', (1195, 1205), True, 'import numpy as np\n'), ((1391, 1413), 'numpy.unique', 'np.unique', (['labels_pred'], {}), '(labels_pred)\n', (1400, 1413), True, 'import numpy as np\n'), ((1662, 1680), 'numpy.zeros', 'np.zeros', (['num_item'], {}), '(num_item)\n', (1670, 1680), True, 'import numpy as np\n'), ((581, 680), 'numpy.linalg.norm', 'np.linalg.norm', (['(features_cosine[i, :] - centroids_cosine[computed_cluster_labels_cosine[i], :]\n )'], {}), '(features_cosine[i, :] - centroids_cosine[\n computed_cluster_labels_cosine[i], :])\n', (595, 680), True, 'import numpy as np\n'), ((877, 896), 'numpy.argmin', 'np.argmin', (['d[index]'], {}), '(d[index])\n', (886, 896), True, 'import numpy as np\n'), ((2289, 2307), 'numpy.zeros', 'np.zeros', (['num_item'], {}), '(num_item)\n', (2297, 2307), True, 'import numpy as np\n'), ((810, 855), 'numpy.where', 'np.where', (['(computed_cluster_labels_cosine == i)'], {}), '(computed_cluster_labels_cosine == i)\n', (818, 855), True, 'import numpy as np\n'), ((1906, 1928), 'scipy.special.comb', 'comb', (['count_cluster', '(2)'], {}), '(count_cluster, 2)\n', (1910, 1928), False, 'from scipy.special import comb\n'), ((2178, 2220), 'numpy.where', 'np.where', (['(target_labels == avail_labels[k])'], {}), '(target_labels == avail_labels[k])\n', (2186, 2220), True, 'import numpy as np\n'), ((2718, 2737), 'scipy.special.comb', 'comb', (['count_item', '(2)'], {}), '(count_item, 2)\n', (2722, 2737), False, 'from scipy.special import comb\n'), ((1275, 1317), 'numpy.where', 'np.where', (['(target_labels == avail_labels[i])'], {}), '(target_labels == avail_labels[i])\n', (1283, 1317), True, 'import numpy as np\n'), ((2588, 2602), 'scipy.special.comb', 'comb', (['count', '(2)'], {}), '(count, 2)\n', (2592, 2602), False, 'from scipy.special import comb\n')]
|
import json
from glob import glob
import sys
from elmoformanylangs import Embedder
input_path = sys.argv[1] if len(sys.argv) > 1 else 'data/training-dataset-2019-01-23'
with open('{}/collection-info.json'.format(input_path), 'r') as f:
collectioninfo = json.load(f)
converters = {
'en': Embedder('ELMoForManyLangs/en'),
'fr': Embedder('ELMoForManyLangs/fr'),
'sp': Embedder('ELMoForManyLangs/es'),
'it': Embedder('ELMoForManyLangs/it')
}
def embed(inFile, converter):
curTree = []
trees = []
# Read sentence trees
for line in open(inFile):
if len(line) < 2:
trees.append(curTree)
curTree = []
elif line[0] != '#':
tok = line.strip().split('\t')
curTree.append(tok)
# Embed all sentences
sents = [[x[1] for x in curTree] for curTree in trees]
embeddings = converter.sents2elmo(sents)
# Add embeddings to trees
outFile = open(inFile + '.elmo', 'w')
for curTree, emb in zip(trees, embeddings):
for itemIdx in range(len(curTree)):
embStr = 'emb=' + ','.join([str(x) for x in emb[itemIdx]])
if curTree[itemIdx][-1] == '_':
curTree[itemIdx][-1] = embStr
else:
curTree[itemIdx][-1] += '|' + embStr
outFile.write('\t'.join(curTree[itemIdx]) + '\n')
outFile.write('\n')
outFile.close()
for probleminfo in collectioninfo:
problem_name = probleminfo['problem-name']
lang = probleminfo['language']
print('Embedding {}'.format(problem_name))
for inFile in glob('{}/{}/*/*.tok'.format(input_path, problem_name)):
embed(inFile, converters[lang])
|
[
"json.load",
"elmoformanylangs.Embedder"
] |
[((259, 271), 'json.load', 'json.load', (['f'], {}), '(f)\n', (268, 271), False, 'import json\n'), ((299, 330), 'elmoformanylangs.Embedder', 'Embedder', (['"""ELMoForManyLangs/en"""'], {}), "('ELMoForManyLangs/en')\n", (307, 330), False, 'from elmoformanylangs import Embedder\n'), ((342, 373), 'elmoformanylangs.Embedder', 'Embedder', (['"""ELMoForManyLangs/fr"""'], {}), "('ELMoForManyLangs/fr')\n", (350, 373), False, 'from elmoformanylangs import Embedder\n'), ((385, 416), 'elmoformanylangs.Embedder', 'Embedder', (['"""ELMoForManyLangs/es"""'], {}), "('ELMoForManyLangs/es')\n", (393, 416), False, 'from elmoformanylangs import Embedder\n'), ((428, 459), 'elmoformanylangs.Embedder', 'Embedder', (['"""ELMoForManyLangs/it"""'], {}), "('ELMoForManyLangs/it')\n", (436, 459), False, 'from elmoformanylangs import Embedder\n')]
|
#right now, requires source /project/projectdirs/desi/software/desi_environment.sh master
from astropy.table import Table
import numpy as np
import os
import argparse
import fitsio
from desitarget.targetmask import zwarn_mask
parser = argparse.ArgumentParser()
parser.add_argument("--night", help="use this if you want to specify the night, rather than just use the last one",default=None)
args = parser.parse_args()
month = args.night[:6]
#get the right tileids
exps = Table.read('/global/cfs/cdirs/desi/spectro/redux/daily/exposure_tables/'+month+'/exposure_table_'+args.night+'.csv')
print('number of exposures found:')
print(len(exps))
#cut to dark tiles
sel = exps['FAPRGRM']=='dark'
print('number that are dark time:')
print(len(exps[sel]))
exps = exps[sel]
#get the list of tileids observed on the last night
tidl = np.unique(exps['TILEID'])
#get total exposure time for tiles
exptl = np.zeros(len(tidl))
for ii in range(0, len(tidl)):
w = exps['TILEID'] == tidl[ii]
expt = np.sum(exps[w]['EFFTIME_ETC'])
exptl[ii] = expt
#sel &= exps['EFFTIME_ETC'] > 850 #select only tiles that should be near completion
sel = exptl > 850
tidl = tidl[sel]
print('number dark tiles that have EFFTIME_ETC > 850 during the night:')
print(len(tidl))
print('looking at LRG redshift results from the night '+str(args.night))
print('the tileids are:')
print(tidl)
#one list for each petal for total targets
gz = np.zeros(10)
tz = np.zeros(10)
zdir = '/global/cfs/cdirs/desi/spectro/redux/daily/tiles/cumulative/'
for tid in tidl:
for pt in range(0,10):
zmtlf = fitsio.read(zdir+str(tid)+'/'+args.night+'/zmtl-'+str(pt)+'-'+str(tid)+'-thru'+args.night+'.fits')
nodata = zmtlf["ZWARN"] & zwarn_mask["NODATA"] != 0
num_nod = np.sum(nodata)
print('looking at petal '+str(pt)+' on tile '+str(tid))
print('number with no data '+str(num_nod))
badqa = zmtlf["ZWARN"] & zwarn_mask.mask("BAD_SPECQA|BAD_PETALQA") != 0
num_badqa = np.sum(badqa)
print('number with bad qa '+str(num_badqa))
nomtl = nodata | badqa
wfqa = ~nomtl
wlrg = (zmtlf['DESI_TARGET'] & 1) > 0
zlrg = zmtlf[wfqa&wlrg]
if len(zlrg) > 0:
wzwarn = zmtlf['ZWARN'] == 0
gzlrg = zmtlf[wzwarn&wlrg]
print('The fraction of good LRGs is '+str(len(gzlrg)/len(zlrg))+' for '+str(len(zlrg))+' considered spectra')
gz[pt] += len(gzlrg)
tz[pt] += len(zlrg)
else:
print('no good lrg data')
print('the total number of LRG considered per petal for the night is:')
print(tz)
tzs = gz/tz
print('the total fraction of good LRG z per petal for the night is:')
print(tzs)
|
[
"astropy.table.Table.read",
"numpy.sum",
"argparse.ArgumentParser",
"desitarget.targetmask.zwarn_mask.mask",
"numpy.zeros",
"numpy.unique"
] |
[((236, 261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (259, 261), False, 'import argparse\n'), ((472, 600), 'astropy.table.Table.read', 'Table.read', (["('/global/cfs/cdirs/desi/spectro/redux/daily/exposure_tables/' + month +\n '/exposure_table_' + args.night + '.csv')"], {}), "('/global/cfs/cdirs/desi/spectro/redux/daily/exposure_tables/' +\n month + '/exposure_table_' + args.night + '.csv')\n", (482, 600), False, 'from astropy.table import Table\n'), ((829, 854), 'numpy.unique', 'np.unique', (["exps['TILEID']"], {}), "(exps['TILEID'])\n", (838, 854), True, 'import numpy as np\n'), ((1424, 1436), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1432, 1436), True, 'import numpy as np\n'), ((1442, 1454), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1450, 1454), True, 'import numpy as np\n'), ((997, 1027), 'numpy.sum', 'np.sum', (["exps[w]['EFFTIME_ETC']"], {}), "(exps[w]['EFFTIME_ETC'])\n", (1003, 1027), True, 'import numpy as np\n'), ((1764, 1778), 'numpy.sum', 'np.sum', (['nodata'], {}), '(nodata)\n', (1770, 1778), True, 'import numpy as np\n'), ((1994, 2007), 'numpy.sum', 'np.sum', (['badqa'], {}), '(badqa)\n', (2000, 2007), True, 'import numpy as np\n'), ((1927, 1968), 'desitarget.targetmask.zwarn_mask.mask', 'zwarn_mask.mask', (['"""BAD_SPECQA|BAD_PETALQA"""'], {}), "('BAD_SPECQA|BAD_PETALQA')\n", (1942, 1968), False, 'from desitarget.targetmask import zwarn_mask\n')]
|
from ArithmeticDictionary import AD
from collections import defaultdict
import numpy as np
class BoW(AD):
def __init__(self, text):
super().__init__()
self.ad = AD()
if text is not None:
for w in text.split():
self.ad += AD({w: 1})
self.update(self.ad)
#Tests if two elements are equal
def check_equal(x, y, msg=None):
if x != y:
if msg is None:
print("Error:")
else:
print("Error in", msg, ":")
print(" Your answer was:", x)
print(" Correct answer: ", y)
else:
print("Success!")
assert x == y, "%r and %r are different" % (x, y)
empty_bow = BoW("")
check_equal(len(empty_bow), 0)
simple_bow = BoW("I like apples")
check_equal(isinstance(simple_bow, AD), True)
check_equal(set(simple_bow.keys()), {'I', 'like', 'apples'})
bow1 = BoW("I like to eat cakes to go")
bow2 = BoW("I like to drink coffee")
bow3 = bow1 + bow2
check_equal(bow3['I'], 2)
check_equal(bow3['to'], 3)
|
[
"ArithmeticDictionary.AD"
] |
[((183, 187), 'ArithmeticDictionary.AD', 'AD', ([], {}), '()\n', (185, 187), False, 'from ArithmeticDictionary import AD\n'), ((279, 289), 'ArithmeticDictionary.AD', 'AD', (['{w: 1}'], {}), '({w: 1})\n', (281, 289), False, 'from ArithmeticDictionary import AD\n')]
|
#!/usr/bin/env python3
import numpy as np
import random
if __name__ == '__main__':
nbViewpoint = 3
nbTileList = [1, 3*2, 6*4]
#nbTileList = [1]
#nbQuality = 4
nbQuality = 3
#nbChunk = 4*60
nbChunk = 256
#nbChunk = 60
nbBandwidth = 1
nbUser = 4
nbProcessedChunk = 32
#nbLagChunkList = [2,10]
#nbLagChunkList = [2,3,4]
nbLagChunkList = [2]
optimalGap=0.03
nbThread=4
#averageLowBitrate = 5
#averageHighBitrate = 35
averageBitrateList = [5, 8, 16]
avgBitrateList = [[5.00638565625, 8.00672046875, 16.01394303125], [5.0235795, 8.02069896875, 16.019751999999997], [5.0842264375, 8.08175678125, 16.080042812500004]]
varBitrateList = [[0.05197598260550684, 0.13243587169603027, 0.5569402424963116], [0.013006378470749997, 0.043633303918936515, 0.3272058487585], [0.012634444530058589, 0.04158807113638965, 0.3401763092415898]]
#averageLowQuality = 220
#averageHighQuality = 5
averageQualityList = [2.8642533333333335, 2.5503899999999997, 2.1635133333333334]
varQualityList = [0.6041990998222223, 0.3490154629, 0.13785629982222222]
#averageBandwidthList = [4, 7, 16, 24, 32, 48, 60]
#averageBandwidthList = [4, 7, 10, 15, 20]
#averageBandwidthList = [5, 10, 15, 20, 25, 30, 35, 40]
#averageBandwidthList = [3, 4, 7, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60]
#averageBandwidthList = [3, 4, 5, 7, 10, 15, 20, 25 ,30]
averageBandwidthList = [3, 5, 7, 10, 15]
#averageBandwidthList = [20, 25, 30, 35, 40, 45, 50, 55, 60]
#averageBandwidthList = [4, 7, 10]
#for i in range(15, 61):
# averageBandwidthList.append(i)
outputUser = dict()
outputAdaptationSet = dict()
for nbTile in nbTileList:
outputUser[nbTile] = dict()
for userId in range(nbUser):
#if userId != 0:
# outputUser[nbTile][userId] = 'scenarios/user_{nbViewpoint}_{nbTile}_{nbChunk}_{userId}.txt'.format(nbViewpoint=nbViewpoint, nbTile=nbTile, nbChunk=nbChunk, userId=userId)
outputUser[nbTile][userId] = 'scenarios/user_{nbViewpoint}_{nbTile}_{nbChunk}_{userId}.txt'.format(nbViewpoint=nbViewpoint, nbTile=nbTile, nbChunk=nbChunk, userId=userId)
#else:
# outputUser[nbTile][userId] = 'scenarios/user_{nbViewpoint}_{nbTile}_{nbChunk}.txt'.format(nbViewpoint=nbViewpoint, nbTile=nbTile, nbChunk=nbChunk)
outputAdaptationSet[nbTile] = 'scenarios/adaptationSet_{nbViewpoint}_{nbTile}_{nbChunk}_{nbQuality}.txt'.format(nbViewpoint=nbViewpoint, nbTile=nbTile, nbChunk=nbChunk, nbQuality=nbQuality)
outputBandwidth = dict()
for averageBandwidth in averageBandwidthList:
outputBandwidth[averageBandwidth] = dict()
for nbLagChunk in nbLagChunkList:
outputBandwidth[averageBandwidth][nbLagChunk] = dict()
for bandwidthId in range(nbBandwidth):
outputBandwidth[averageBandwidth][nbLagChunk][bandwidthId] = 'scenarios/bandwdithFake_{nbChunk}_{nbLagChunk}_{averageBandwidth}Mbps_id{bandwidthId}.txt'.format(nbChunk=nbChunk, nbLagChunk=nbLagChunk, averageBandwidth=averageBandwidth, bandwidthId=bandwidthId)
random.seed(42)
np.random.seed(42)
#Configuration file
with open('Config.ini', 'w') as o:
o.write('[Global]\nNbViewpoint={nbViewpoint}\nNbScenario={nbScenario}\noptimalGap={optimalGap}\nnbThread={nbThread}\n'.format(nbViewpoint=nbViewpoint, nbScenario=len(averageBandwidthList)*len(nbTileList)*len(nbLagChunkList), optimalGap=optimalGap, nbThread=nbThread))
counter = 0
#for scenarioId in reversed(range(len(averageBandwidthList))):
for scenarioId in range(len(averageBandwidthList)):
for nbTileId in range(len(nbTileList)):
for nbLagChunkId in range(len(nbLagChunkList)):
nbTile = nbTileList[nbTileId]
nbLagChunk = nbLagChunkList[nbLagChunkId]
o.write('Scenario{id}={name}\n'.format(id=counter, name='Test_{}_{}_{}_{}Mbps'.format(nbChunk, nbLagChunk, nbTile, averageBandwidthList[scenarioId])))
counter += 1
o.write('\n')
#for scenarioId in reversed(range(len(averageBandwidthList))):
for scenarioId in range(len(averageBandwidthList)):
for nbTile in nbTileList:
for nbLagChunk in nbLagChunkList:
bandwidthConf = ""
for bandwidthId in range(nbBandwidth):
bandwidthConf += '{}../{}'.format(';' if bandwidthConf != "" else '', outputBandwidth[averageBandwidthList[scenarioId]][nbLagChunk][bandwidthId])
userConf = ""
for userId in range(nbUser):
userConf += '{}../{}'.format(';' if userConf != "" else '', outputUser[nbTile][userId])
o.write('[{name}]\nNbTile={nbTile}\nNbQuality={nbQuality}\nNbChunk={nbChunk}\nNbProcessedChunk={nbProcessedChunk}\nNbLagDownloadChunk={nbLagChunk}\nAdaptationSetConf=../{asName}\nBandwidthConf={bName}\nUserConf={uName}\n'.format(name='Test_{}_{}_{}_{}Mbps'.format(nbChunk, nbLagChunk, nbTile, averageBandwidthList[scenarioId]), nbTile=nbTile, nbChunk=nbChunk, nbProcessedChunk=nbProcessedChunk, nbQuality=nbQuality, nbLagChunk=nbLagChunk, asName=outputAdaptationSet[nbTile], uName=userConf, bName=bandwidthConf))
o.write('horizontalOptimal={}\n'.format('true'))# if averageBandwidthList[scenarioId] > 0.9*nbViewpoint*averageLowBitrate else 'false'))
o.write('optimal={}\n'.format('true'))
o.write('verticalOptimal={}\n'.format('true'))
o.write('avgBandwidth={}\n'.format(averageBandwidthList[scenarioId]))
o.write('\n')
#Bandwidth file
#bandwidthList = [max(0.1*averageBandwidthList[0], np.random.normal(averageBandwidthList[0], 0.15*averageBandwidthList[0])) for i in range(-nbLagChunk, nbChunk)]
for averageBandwidth in averageBandwidthList:
for nbLagChunk in nbLagChunkList:
for bandwidthId in range(nbBandwidth):
with open(outputBandwidth[averageBandwidth][nbLagChunk][bandwidthId], 'w') as ob:
ob.write('#chunId,bandwidth\n')
for chunId in range(-nbLagChunk, nbChunk):
ob.write('{},{}\n'.format(chunId, np.random.normal(averageBandwidth, 0.05*averageBandwidth)))
#ob.write('{},{}\n'.format(chunId, bandwidthList[chunId]*averageBandwidth/averageBandwidthList[0]))
##User:
#for userId in range(nbUser):
# viewpointList = list()
# switchingTime = list()
# lastViewpoint = None
# for chunId in range(nbChunk):
# if lastViewpoint is None:
# currentViewpoint = random.randint(0,nbViewpoint-1)
# else:
# vList = [i for i in range(0, nbViewpoint)]
# pList = list()
# for v in vList:
# if v == lastViewpoint:
# pList.append(35)
# else:
# pList.append(1)
# pList = np.array(pList)/sum(pList)
# currentViewpoint = np.random.choice(vList, p=pList)
# if lastViewpoint != currentViewpoint:
# if lastViewpoint is not None:
# switchingTime.append(np.random.uniform())
# print('Switch at',chunId, 'from',lastViewpoint,'to',currentViewpoint)
# else:
# switchingTime.append(-1)
# lastViewpoint = currentViewpoint
# viewpointList.append(currentViewpoint)
# switchingTime.append(-1)
# visibility = list()
# for chunId in range(nbChunk):
# visiList = list()
# nbTile = 24
# totalVisi = 0
# for tileId in range(nbTile):
# if tileId != nbTile-1:
# visiList.append(random.randint(0,1000-totalVisi)/1000)
# totalVisi += int(visiList[tileId]*1000)
# else:
# visiList.append(1-totalVisi/1000)
# random.shuffle(visiList)
# visibility.append(visiList)
# for nbTile in nbTileList:
# if userId != 0:
# with open(outputUser[nbTile][userId], 'w') as ou:
# ou.write('#chunkId,viewpointId,tileId,visibilityRatio,switchingDecisionTime\n')
# for chunId in range(nbChunk):
# currentViewpoint = viewpointList[chunId]
# for viewpointId in range(nbViewpoint):
# for tileId in range(nbTile):
# if viewpointId != currentViewpoint:
# ou.write('{},{},{},{},{}\n'.format(chunId, viewpointId, tileId, 0, switchingTime[chunId]))
# else:
# if nbTile == 24:
# visi = visibility[chunId][tileId]
# elif nbTile == 6:
# if tileId == 5:
# visi = round(10000*sum(visibility[chunId][tileId*6:(tileId+1)*6]))/10000
# else:
# visi = round(1000*sum(visibility[chunId][tileId*6:(tileId+1)*6]))/1000
# elif nbTile == 1:
# visi = round(1000*sum(visibility[chunId])/1000)
# else:
# raise 'NOT SUPPORTED TILE NUMBER'
# ou.write('{},{},{},{}, {}\n'.format(chunId, viewpointId, tileId, visi, switchingTime[chunId]))
#AdaptationSet
#for nbTile in nbTileList:
# with open(outputAdaptationSet[nbTile], 'w') as oas:
# oas.write('#chunkId,viewpointId,tileId,qualityId,distortion,bitrate\n')
# for chunId in range(nbChunk):
# for viewpointId in range(nbViewpoint):
# for tileId in range(nbTile):
# for qualityId in range(nbQuality):
# #avBitrate = (qualityId*(averageHighBitrate-averageLowBitrate)/(nbQuality-1) + averageLowBitrate)/nbTile
# #avBitrate = averageBitrateList[qualityId]/nbTile
# #avDistortion = (qualityId*(averageHighQuality-averageLowQuality)/(nbQuality-1) + averageLowQuality)**2
# #bitrate = np.random.normal(avBitrate, 0.05*avBitrate/nbTile)
# bitrate = np.random.normal(avgBitrateList[nbTileList.index(nbTile)][qualityId], varBitrateList[nbTileList.index(nbTile)][qualityId])/nbTile
# #distortion = np.random.normal(avDistortion, avDistortion*0.005)
# distortion = np.random.normal(averageQualityList[qualityId], varQualityList[qualityId])
# #bitrate = max(bitrate, 0.05*avBitrate/nbTile)
# distortion = min(255*255, max(0, distortion))
# oas.write('{},{},{},{},{},{}\n'.format(chunId, viewpointId, tileId, qualityId, distortion, bitrate))
|
[
"numpy.random.seed",
"random.seed",
"numpy.random.normal"
] |
[((3166, 3181), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (3177, 3181), False, 'import random\n'), ((3186, 3204), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3200, 3204), True, 'import numpy as np\n'), ((6374, 6433), 'numpy.random.normal', 'np.random.normal', (['averageBandwidth', '(0.05 * averageBandwidth)'], {}), '(averageBandwidth, 0.05 * averageBandwidth)\n', (6390, 6433), True, 'import numpy as np\n')]
|
# %matplotlib inline
# +
import os, sys
import numpy as np
import random
import copy
import torch
import torch.autograd as autograd
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset, TensorDataset
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data.sampler import SubsetRandomSampler
from config import *
from models import *
from utils import *
from datasets.celeba import CelebA
from ops import exp_mov_avg
#from torchsummary import summary
from torchinfo import summary
from tqdm import tqdm
IMG_DIM = -1
NUM_CLASSES = -1
CLIP_BOUND = 1.
SENSITIVITY = 2.
DATA_ROOT = './../data'
# +
def master_hook_adder(module, grad_input, grad_output):
'''
global hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
global dynamic_hook_function
return dynamic_hook_function(module, grad_input, grad_output)
def dummy_hook(module, grad_input, grad_output):
'''
dummy hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
pass
def modify_gradnorm_conv_hook(module, grad_input, grad_output):
'''
gradient modification hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
### get grad wrt. input (image)
grad_wrt_image = grad_input[0]
grad_input_shape = grad_wrt_image.size()
batchsize = grad_input_shape[0]
clip_bound_ = CLIP_BOUND / batchsize # account for the 'sum' operation in GP
grad_wrt_image = grad_wrt_image.view(batchsize, -1)
grad_input_norm = torch.norm(grad_wrt_image, p=2, dim=1)
### clip
clip_coef = clip_bound_ / (grad_input_norm + 1e-10)
clip_coef = clip_coef.unsqueeze(-1)
grad_wrt_image = clip_coef * grad_wrt_image
grad_input = (grad_wrt_image.view(grad_input_shape), grad_input[1], grad_input[2])
return tuple(grad_input)
def dp_conv_hook(module, grad_input, grad_output):
'''
gradient modification + noise hook
:param module:
:param grad_input:
:param grad_output:
:return:
'''
global noise_multiplier
### get grad wrt. input (image)
grad_wrt_image = grad_input[0]
grad_input_shape = grad_wrt_image.size()
batchsize = grad_input_shape[0]
clip_bound_ = CLIP_BOUND / batchsize
grad_wrt_image = grad_wrt_image.view(batchsize, -1)
grad_input_norm = torch.norm(grad_wrt_image, p=2, dim=1)
### clip
clip_coef = clip_bound_ / (grad_input_norm + 1e-10)
clip_coef = torch.min(clip_coef, torch.ones_like(clip_coef))
clip_coef = clip_coef.unsqueeze(-1)
grad_wrt_image = clip_coef * grad_wrt_image
### add noise
noise = clip_bound_ * noise_multiplier * SENSITIVITY * torch.randn_like(grad_wrt_image)
grad_wrt_image = grad_wrt_image + noise
grad_input_new = [grad_wrt_image.view(grad_input_shape)]
for i in range(len(grad_input)-1):
grad_input_new.append(grad_input[i+1])
return tuple(grad_input_new)
FloatTensor = torch.cuda.FloatTensor
LongTensor = torch.cuda.LongTensor
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# -
def main(args):
### config
global noise_multiplier
dataset = args.dataset
num_discriminators = args.num_discriminators
noise_multiplier = args.noise_multiplier
z_dim = args.z_dim
if dataset == 'celeba':
z_dim = 100
model_dim = args.model_dim
batchsize = args.batchsize
L_gp = args.L_gp
L_epsilon = args.L_epsilon
critic_iters = args.critic_iters
latent_type = args.latent_type
load_dir = args.load_dir
save_dir = args.save_dir
if_dp = (args.noise_multiplier > 0.)
gen_arch = args.gen_arch
num_gpus = args.num_gpus
### CUDA
use_cuda = torch.cuda.is_available()
devices = [torch.device("cuda:%d" % i if use_cuda else "cpu") for i in range(num_gpus)]
device0 = devices[0]
if use_cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Random seed
if args.random_seed == 1:
args.random_seed = np.random.randint(10000, size=1)[0]
print('random_seed: {}'.format(args.random_seed))
os.system('rm ' + os.path.join(save_dir, 'seed*'))
os.system('touch ' + os.path.join(save_dir, 'seed=%s' % str(args.random_seed)))
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
### Set up models
print('gen_arch:' + gen_arch)
if dataset == 'celeba':
ngpu = 1
netG = Generator_celeba(ngpu).to(device0)
#netG.load_state_dict(torch.load('../results/celeba/main/d_1_2e-4_g_1_2e-4_SN_full/netG_15000.pth'))
# Handle multi-gpu if desired
if (device0.type == 'cuda') and (ngpu > 1):
netG = nn.DataParallel(netG, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.02.
netG.apply(weights_init)
netGS = copy.deepcopy(netG).to(device0)
if dataset == 'celeba':
ngpu = 1
netD = Discriminator_celeba(ngpu).to(device0)
#netD.load_state_dict(torch.load('../results/celeba/main/d_1_2e-4_g_1_2e-4_SN_full/netD_15000.pth'))
# Handle multi-gpu if desired
if (device0.type == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
#netD.apply(weights_init)
### Set up optimizers
optimizerD = optim.Adam(netD.parameters(), lr=2e-4, betas=(0.5, 0.99))
optimizerG = optim.Adam(netG.parameters(), lr=2e-4, betas=(0.5, 0.99))
### Data loaders
if dataset == 'celeba':
transform_train = transforms.Compose([
transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if dataset == 'celeba':
IMG_DIM = 64*64*3
NUM_CLASSES = 2
trainset = CelebA(root=os.path.join('/work/u5366584/exp/datasets/celeba'), split='train',
transform=transform_train, download=False)#, custom_subset=True)
#trainset = CelebA(root=os.path.join('../data'), split='train',
# transform=transform_train, download=False, custom_subset=True)
else:
raise NotImplementedError
###fix sub-training set (fix to 10000 training samples)
if args.update_train_dataset:
if dataset == 'mnist':
indices_full = np.arange(60000)
elif dataset == 'cifar_10':
indices_full = np.arange(50000)
elif dataset == 'celeba':
indices_full = np.arange(len(trainset))
np.random.shuffle(indices_full)
'''
#####ref
indices = np.loadtxt('index_20k.txt', dtype=np.int_)
remove_idx = [np.argwhere(indices_full==x) for x in indices]
indices_ref = np.delete(indices_full, remove_idx)
indices_slice = indices_ref[:20000]
np.savetxt('index_20k_ref.txt', indices_slice, fmt='%i') ##ref index is disjoint to original index
'''
### growing dataset
indices = np.loadtxt('index_20k.txt', dtype=np.int_)
remove_idx = [np.argwhere(indices_full==x) for x in indices]
indices_rest = np.delete(indices_full, remove_idx)
indices_rest = indices_rest[:20000]
indices_slice = np.concatenate((indices, indices_rest), axis=0)
np.savetxt('index_40k.txt', indices_slice, fmt='%i')
indices = np.loadtxt('index_100k.txt', dtype=np.int_)
trainset = torch.utils.data.Subset(trainset, indices)
print(len(trainset))
workers = 4
dataloader = torch.utils.data.DataLoader(trainset, batch_size=batchsize,
shuffle=True, num_workers=workers)
if if_dp:
### Register hook
global dynamic_hook_function
for netD in netD_list:
netD.conv1.register_backward_hook(master_hook_adder)
criterion = nn.BCELoss()
real_label = 1.
fake_label = 0.
nz = 100
fixed_noise = torch.randn(100, nz, 1, 1, device=device0)
iters = 0
num_epochs = 256 * 5 + 1
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, (data,y) in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data.to(device0)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device0)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device0)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
errD_fake.backward()
D_G_z1 = output.mean().item()
# Compute error of D as sum over the fake and the real batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
iters += 1
for iter_g in range(1):
############################
# Update G network
###########################
if if_dp:
### Sanitize the gradients passed to the Generator
dynamic_hook_function = dp_conv_hook
else:
### Only modify the gradient norm, without adding noise
dynamic_hook_function = modify_gradnorm_conv_hook
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
noise = torch.randn(b_size, nz, 1, 1, device=device0)
fake = netG(noise)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device0)
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
### update the exponential moving average
exp_mov_avg(netGS, netG, alpha=0.999, global_step=iters)
############################
### Results visualization
############################
if iters % 10 ==0:
print('iter:{}, G_cost:{:.2f}, D_cost:{:.2f}'.format(iters, errG.item(),
errD.item(),
))
if iters % args.vis_step == 0:
if dataset == 'celeba':
generate_image_celeba(str(iters+0), netGS, fixed_noise, save_dir, device0)
if iters % args.save_step==0:
### save model
torch.save(netGS.state_dict(), os.path.join(save_dir, 'netGS_%s.pth' % str(iters+0)))
torch.save(netD.state_dict(), os.path.join(save_dir, 'netD_%s.pth' % str(iters+0)))
torch.cuda.empty_cache()
#if ((iters+1) % 500 == 0):
# classify_training(netGS, dataset, iters+1)
if __name__ == '__main__':
args = parse_arguments()
save_config(args)
main(args)
|
[
"numpy.random.seed",
"torch.randn",
"torch.set_default_tensor_type",
"torch.full",
"numpy.random.randint",
"numpy.arange",
"torch.device",
"torchvision.transforms.Normalize",
"os.path.join",
"torch.utils.data.DataLoader",
"numpy.savetxt",
"random.seed",
"numpy.loadtxt",
"torchvision.transforms.CenterCrop",
"numpy.random.shuffle",
"copy.deepcopy",
"torch.randn_like",
"torch.manual_seed",
"torch.norm",
"torch.cuda.is_available",
"numpy.argwhere",
"numpy.delete",
"numpy.concatenate",
"torchvision.transforms.Resize",
"torch.utils.data.Subset",
"torch.ones_like",
"ops.exp_mov_avg",
"torch.cuda.empty_cache",
"torchvision.transforms.ToTensor"
] |
[((1654, 1692), 'torch.norm', 'torch.norm', (['grad_wrt_image'], {'p': '(2)', 'dim': '(1)'}), '(grad_wrt_image, p=2, dim=1)\n', (1664, 1692), False, 'import torch\n'), ((2455, 2493), 'torch.norm', 'torch.norm', (['grad_wrt_image'], {'p': '(2)', 'dim': '(1)'}), '(grad_wrt_image, p=2, dim=1)\n', (2465, 2493), False, 'import torch\n'), ((4009, 4034), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4032, 4034), False, 'import torch\n'), ((4544, 4573), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4555, 4573), False, 'import random\n'), ((4578, 4610), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4592, 4610), True, 'import numpy as np\n'), ((4615, 4650), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4632, 4650), False, 'import torch\n'), ((7849, 7892), 'numpy.loadtxt', 'np.loadtxt', (['"""index_100k.txt"""'], {'dtype': 'np.int_'}), "('index_100k.txt', dtype=np.int_)\n", (7859, 7892), True, 'import numpy as np\n'), ((7908, 7950), 'torch.utils.data.Subset', 'torch.utils.data.Subset', (['trainset', 'indices'], {}), '(trainset, indices)\n', (7931, 7950), False, 'import torch\n'), ((8014, 8112), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batchsize', 'shuffle': '(True)', 'num_workers': 'workers'}), '(trainset, batch_size=batchsize, shuffle=True,\n num_workers=workers)\n', (8041, 8112), False, 'import torch\n'), ((8433, 8475), 'torch.randn', 'torch.randn', (['(100)', 'nz', '(1)', '(1)'], {'device': 'device0'}), '(100, nz, 1, 1, device=device0)\n', (8444, 8475), False, 'import torch\n'), ((2601, 2627), 'torch.ones_like', 'torch.ones_like', (['clip_coef'], {}), '(clip_coef)\n', (2616, 2627), False, 'import torch\n'), ((2795, 2827), 'torch.randn_like', 'torch.randn_like', (['grad_wrt_image'], {}), '(grad_wrt_image)\n', (2811, 2827), False, 'import torch\n'), ((4050, 4100), 'torch.device', 'torch.device', (["('cuda:%d' % i if use_cuda else 'cpu')"], {}), "('cuda:%d' % i if use_cuda else 'cpu')\n", (4062, 4100), False, 'import torch\n'), ((4177, 4232), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.cuda.FloatTensor"""'], {}), "('torch.cuda.FloatTensor')\n", (4206, 4232), False, 'import torch\n'), ((6999, 7030), 'numpy.random.shuffle', 'np.random.shuffle', (['indices_full'], {}), '(indices_full)\n', (7016, 7030), True, 'import numpy as np\n'), ((7486, 7528), 'numpy.loadtxt', 'np.loadtxt', (['"""index_20k.txt"""'], {'dtype': 'np.int_'}), "('index_20k.txt', dtype=np.int_)\n", (7496, 7528), True, 'import numpy as np\n'), ((7621, 7656), 'numpy.delete', 'np.delete', (['indices_full', 'remove_idx'], {}), '(indices_full, remove_idx)\n', (7630, 7656), True, 'import numpy as np\n'), ((7726, 7773), 'numpy.concatenate', 'np.concatenate', (['(indices, indices_rest)'], {'axis': '(0)'}), '((indices, indices_rest), axis=0)\n', (7740, 7773), True, 'import numpy as np\n'), ((7782, 7834), 'numpy.savetxt', 'np.savetxt', (['"""index_40k.txt"""', 'indices_slice'], {'fmt': '"""%i"""'}), "('index_40k.txt', indices_slice, fmt='%i')\n", (7792, 7834), True, 'import numpy as np\n'), ((12673, 12697), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12695, 12697), False, 'import torch\n'), ((4311, 4343), 'numpy.random.randint', 'np.random.randint', (['(10000)'], {'size': '(1)'}), '(10000, size=1)\n', (4328, 4343), True, 'import numpy as np\n'), ((4423, 4454), 'os.path.join', 'os.path.join', (['save_dir', '"""seed*"""'], {}), "(save_dir, 'seed*')\n", (4435, 4454), False, 'import os, sys\n'), ((5221, 5240), 'copy.deepcopy', 'copy.deepcopy', (['netG'], {}), '(netG)\n', (5234, 5240), False, 'import copy\n'), ((6806, 6822), 'numpy.arange', 'np.arange', (['(60000)'], {}), '(60000)\n', (6815, 6822), True, 'import numpy as np\n'), ((7551, 7581), 'numpy.argwhere', 'np.argwhere', (['(indices_full == x)'], {}), '(indices_full == x)\n', (7562, 7581), True, 'import numpy as np\n'), ((9063, 9131), 'torch.full', 'torch.full', (['(b_size,)', 'real_label'], {'dtype': 'torch.float', 'device': 'device0'}), '((b_size,), real_label, dtype=torch.float, device=device0)\n', (9073, 9131), False, 'import torch\n'), ((9559, 9604), 'torch.randn', 'torch.randn', (['b_size', 'nz', '(1)', '(1)'], {'device': 'device0'}), '(b_size, nz, 1, 1, device=device0)\n', (9570, 9604), False, 'import torch\n'), ((11751, 11807), 'ops.exp_mov_avg', 'exp_mov_avg', (['netGS', 'netG'], {'alpha': '(0.999)', 'global_step': 'iters'}), '(netGS, netG, alpha=0.999, global_step=iters)\n', (11762, 11807), False, 'from ops import exp_mov_avg\n'), ((6039, 6060), 'torchvision.transforms.Resize', 'transforms.Resize', (['(64)'], {}), '(64)\n', (6056, 6060), True, 'import torchvision.transforms as transforms\n'), ((6070, 6095), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(64)'], {}), '(64)\n', (6091, 6095), True, 'import torchvision.transforms as transforms\n'), ((6105, 6126), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6124, 6126), True, 'import torchvision.transforms as transforms\n'), ((6136, 6190), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (6156, 6190), True, 'import torchvision.transforms as transforms\n'), ((6313, 6363), 'os.path.join', 'os.path.join', (['"""/work/u5366584/exp/datasets/celeba"""'], {}), "('/work/u5366584/exp/datasets/celeba')\n", (6325, 6363), False, 'import os, sys\n'), ((6887, 6903), 'numpy.arange', 'np.arange', (['(50000)'], {}), '(50000)\n', (6896, 6903), True, 'import numpy as np\n'), ((10941, 10986), 'torch.randn', 'torch.randn', (['b_size', 'nz', '(1)', '(1)'], {'device': 'device0'}), '(b_size, nz, 1, 1, device=device0)\n', (10952, 10986), False, 'import torch\n'), ((11046, 11114), 'torch.full', 'torch.full', (['(b_size,)', 'real_label'], {'dtype': 'torch.float', 'device': 'device0'}), '((b_size,), real_label, dtype=torch.float, device=device0)\n', (11056, 11114), False, 'import torch\n')]
|
import sys
from logging import getLogger
from typing import Optional
from thonny import ui_utils
from thonny.plugins.micropython.mp_front import (
BareMetalMicroPythonConfigPage,
BareMetalMicroPythonProxy,
)
from thonny.plugins.micropython.uf2dialog import Uf2FlashingDialog
logger = getLogger(__name__)
VIDS_PIDS_TO_AVOID = set()
class CircuitPythonProxy(BareMetalMicroPythonProxy):
@classmethod
def get_known_usb_vids_pids(cls):
"""Information gathered from
https://github.com/mu-editor/mu/blob/master/mu/modes/circuitpython.py
https://github.com/microsoft/uf2-samdx1
"""
return {
(0x03EB, 0x2402), # Generic Corp., SAMD21 or SAME54 Board
(0x04D8, 0xEC72), # XinaBox CC03
(0x04D8, 0xEC75), # XinaBox CS11
(0x04D8, 0xED94), # PyCubed
(0x04D8, 0xED5E), # XinaBox CW03
(0x04D8, 0xEDB3), # Capable Robot Components, Programmable USB Hub
(0x04D8, 0xEDBE), # maholli, SAM32
(0x04D8, 0xEF66), # eduSense, senseBox MCU
(0x1209, 0x2017), # <NAME>, Mini SAM M4
(0x1209, 0x4D44), # Robotics Masters, Robo HAT MM1 M4
(0x1209, 0x7102), # Mini SAM M0
(0x1209, 0xBAB1), # Electronic Cats Meow Meow
(0x1209, 0xBAB2), # Electronic Cats CatWAN USBStick
(0x1209, 0xBAB3), # Electronic Cats Bast Pro Mini M0
(0x1209, 0xBAB6), # Electronic Cats Escornabot Makech
(0x16D0, 0x0CDA), # dadamachines, automat
(0x1B4F, 0x0016), # Sparkfun Thing Plus - SAMD51
(0x1B4F, 0x8D22), # SparkFun SAMD21 Mini Breakout
(0x1B4F, 0x8D23), # SparkFun SAMD21 Dev Breakout
(0x1D50, 0x60E8), # PewPew Game Console
(0x1D50, 0x6110), # Eitech, Robotics
(0x1D50, 0x6112), # Watterott electronic, Wattuino RC
(0x2341, 0x8053), # Arduino LLC, Arduino MKR1300
(0x2341, 0x8057), # Arduino Nano 33 IoT board
(0x239A, None), # Adafruit
(0x2886, 0x802D), # Seeed Wio Terminal
(0x2886, 0x000D), # Seeed Studio, Grove Zero
(0x2B04, 0xC00C), # Particle Argon
(0x2B04, 0xC00D), # Particle Boron
(0x2B04, 0xC00E), # Particle Xenon
(0x3171, 0x0101), # 8086.net Commander
}
@classmethod
def get_vids_pids_to_avoid(self):
return VIDS_PIDS_TO_AVOID
def _get_backend_launcher_path(self) -> str:
import thonny.plugins.circuitpython.cirpy_back
return thonny.plugins.circuitpython.cirpy_back.__file__
@classmethod
def _is_for_micropython(cls):
return False
@classmethod
def _is_for_circuitpython(cls):
return True
@classmethod
def _is_potential_port(cls, p):
if "adafruit_board_toolkit" in sys.modules or sys.platform == "linux":
# can trust p.interface value
return "CircuitPython CDC " in (p.interface or "")
else:
return super()._is_potential_port(p)
class CircuitPythonConfigPage(BareMetalMicroPythonConfigPage):
def _get_usb_driver_url(self):
return "https://learn.adafruit.com/welcome-to-circuitpython/installing-circuitpython"
def _has_flashing_dialog(self):
return True
def _open_flashing_dialog(self):
dlg = CircuitPythonFlashingDialog(self)
ui_utils.show_dialog(dlg)
class CircuitPythonFlashingDialog(Uf2FlashingDialog):
def __init__(self, master):
self._devices_info = {}
super(CircuitPythonFlashingDialog, self).__init__(master)
def get_instructions(self) -> Optional[str]:
return (
"This dialog allows you to install or update CircuitPython firmware on your device.\n"
"\n"
"1. Plug in your device into bootloader mode by double-pressing the reset button.\n"
"2. Wait until device information appears.\n"
"3. (If nothing happens in 10 seconds, then try shorter or longer pauses between presses.)\n"
"4. Click 'Install' and wait until done.\n"
"5. Close the dialog and start programming!"
)
def _get_release_info_url(self):
return "https://api.github.com/repos/adafruit/circuitpython/releases/latest"
def _get_devices_info_url(self):
# use the master version, not bundled version
return "https://raw.githubusercontent.com/thonny/thonny/master/thonny/plugins/circuitpython/devices.json"
def _download_release_info(self):
# First download devices
import json
from urllib.request import urlopen
try:
with urlopen(self._get_devices_info_url()) as fp:
self._devices_info = json.loads(fp.read().decode("UTF-8"))
except Exception as e:
logger.warning(
"Could not find release info from %s", self._get_release_info_url(), exc_info=e
)
return
# ... and then release
super(CircuitPythonFlashingDialog, self)._download_release_info()
def get_download_url_and_size(self, board_id):
# TODO: should take vid/pid also into account. It looks like different models may have same board_id
if self._release_info is None or self._devices_info is None:
return None
if not "tag_name" in self._release_info:
raise RuntimeError("Could not find tag_name from %s" % self._get_release_info_url())
release = self._release_info["tag_name"]
if not self._devices_info.get(board_id, {}).get("FIRMWARE_DOWNLOAD", None):
raise RuntimeError(
"Could not find your board (%s) or its download url from %s (consider making a PR). "
% (board_id, self._get_devices_info_url())
+ "Please find the firmware from https://circuitpython.org/ and install it manually."
)
url = self._devices_info[board_id]["FIRMWARE_DOWNLOAD"].format(
lang="en_US", release=release
)
# reporting approximate size for now. Downloader can take precise value from the header later
size = 2**20 # 1 MiB
return (url, size)
def _is_suitable_asset(self, asset, model_id):
# not used here
return False
def get_title(self):
return "Install CircuitPython firmware for your device"
def _get_vid_pids_to_wait_for(self):
return CircuitPythonProxy.get_known_usb_vids_pids()
|
[
"thonny.ui_utils.show_dialog",
"logging.getLogger"
] |
[((294, 313), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (303, 313), False, 'from logging import getLogger\n'), ((3442, 3467), 'thonny.ui_utils.show_dialog', 'ui_utils.show_dialog', (['dlg'], {}), '(dlg)\n', (3462, 3467), False, 'from thonny import ui_utils\n')]
|
import open3d as o3d
import glob, plyfile, numpy as np, multiprocessing as mp, torch
import copy
import numpy as np
import json
import pdb
import os
#CLASS_LABELS = ['cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
CLASS_LABELS = ['wall', 'floor', 'chair', 'table', 'desk', 'bed', 'bookshelf', 'sofa', 'sink', 'bathtub', 'toilet', 'curtain', 'counter', 'door', 'window', 'shower curtain', 'refrigerator', 'picture', 'cabinet', 'otherfurniture']
VALID_CLASS_IDS = np.array([1,2,3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
NYUID_TO_LABEL = {}
LABEL_TO_NYUID = {}
NYUID_TO_SEGID = {}
for i in range(len(VALID_CLASS_IDS)):
LABEL_TO_NYUID[CLASS_LABELS[i]] = VALID_CLASS_IDS[i]
NYUID_TO_LABEL [VALID_CLASS_IDS[i]] = CLASS_LABELS[i]
NYUID_TO_SEGID[VALID_CLASS_IDS[i]] = i
SELECTED_LABEL_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
LABEL_ID_TO_CLASS_ID = {
# 9: 3 #force 'desk' to be the same class_id as 'table'
}
for i, label_id in enumerate(SELECTED_LABEL_IDS):
if label_id not in LABEL_ID_TO_CLASS_ID:
LABEL_ID_TO_CLASS_ID[label_id] = i
UNKNOWN_ID = -100
MIN_INSTANCE_SIZE = 600
remapper=np.ones(500)*(-100)
for i,x in enumerate([1,2,3,4,5,6,7,8,9,10,11,12,14,16,24,28,33,34,36,39]):
remapper[x]=i
## read coordinates, color, semantic_labels, indices and dists of 2 nearest points
def f(fn):
# output_file = output_path + "/" + fn.rsplit("/", 1)[1][:-18]+'.pth'
# if os.path.exists(output_file)==True:
# print("exist:",output_file)
# return 0
a=plyfile.PlyData().read(fn)
v=np.array([list(x) for x in a.elements[0]])
coords=np.ascontiguousarray(v[:,:3])
colors=np.ascontiguousarray(v[:,3:6])/255.0 - 0.5
position=np.ascontiguousarray(v[:,8:10])
dis=np.ascontiguousarray(v[:,10:12])
# filter out very small segements and reassign instance labels
w = np.zeros((len(coords),2), dtype = np.int32)
w[:,:] = UNKNOWN_ID
semantic_labels_ori = np.array(a.elements[0]['label'])
instance_labels = np.array(a.elements[0]['instance_label'])
semantic_labels = np.array(list(
map(lambda label_id: LABEL_ID_TO_CLASS_ID[label_id] if label_id in LABEL_ID_TO_CLASS_ID else UNKNOWN_ID,
semantic_labels_ori)))
for id in range(instance_labels.max()+1):
instance_indices = (instance_labels == id)
instance_size = instance_indices.sum()
# print(instance_size)
if instance_size > MIN_INSTANCE_SIZE:
w[instance_indices,0] = semantic_labels[instance_indices]
w[instance_indices,1] = id
# print(np.unique(w[:,0]))
# print(np.unique(w[:,1]))
# w[:,0] = remapper[]
# w[:,1] = remapper[]
json_file = open(fn[:-3]+'0.010000.segs.json')
region = np.asarray(json.load(json_file)['segIndices'])
all={
"coords":coords,
"colors":colors,
"w":w,
'region': region
}
# print("save to "+ output_file)
# # pdb.set_trace()
# torch.save((coords,colors,w1,w2,position,dis),output_file )
fileName = fn[:-4] + '_instance.pth'
torch.save(all, fileName)
print(fileName)
print("avilable cpus: ", mp.cpu_count())
files = sorted(glob.glob('/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/train/*.ply'))
# print(files[0])
# f(files[0])
p = mp.Pool(processes=mp.cpu_count() - 4)
p.map(f, files)
p.close()
p.join()
files = sorted(glob.glob('/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/val/*.ply'))
p = mp.Pool(processes=mp.cpu_count() - 4)
p.map(f, files)
p.close()
p.join()
files = sorted(glob.glob('/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/train/*.ply'))
p = mp.Pool(processes=mp.cpu_count() - 4)
p.map(f, files)
p.close()
p.join()
files = sorted(glob.glob('/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/val/*.ply'))
p = mp.Pool(processes=mp.cpu_count() - 4)
p.map(f, files)
p.close()
p.join()
# # parallel
# p = mp.Pool(processes=mp.cpu_count())
# p.map(f_gene_ply,files)
# p.close()
# p.join()
|
[
"json.load",
"plyfile.PlyData",
"numpy.ones",
"torch.save",
"numpy.array",
"glob.glob",
"numpy.ascontiguousarray",
"multiprocessing.cpu_count"
] |
[((613, 698), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36,\n 39])\n', (621, 698), True, 'import numpy as np\n'), ((1322, 1334), 'numpy.ones', 'np.ones', (['(500)'], {}), '(500)\n', (1329, 1334), True, 'import numpy as np\n'), ((1800, 1830), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['v[:, :3]'], {}), '(v[:, :3])\n', (1820, 1830), True, 'import numpy as np\n'), ((1908, 1940), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['v[:, 8:10]'], {}), '(v[:, 8:10])\n', (1928, 1940), True, 'import numpy as np\n'), ((1948, 1981), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['v[:, 10:12]'], {}), '(v[:, 10:12])\n', (1968, 1981), True, 'import numpy as np\n'), ((2159, 2191), 'numpy.array', 'np.array', (["a.elements[0]['label']"], {}), "(a.elements[0]['label'])\n", (2167, 2191), True, 'import numpy as np\n'), ((2214, 2255), 'numpy.array', 'np.array', (["a.elements[0]['instance_label']"], {}), "(a.elements[0]['instance_label'])\n", (2222, 2255), True, 'import numpy as np\n'), ((3279, 3304), 'torch.save', 'torch.save', (['all', 'fileName'], {}), '(all, fileName)\n', (3289, 3304), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3351, 3365), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3363, 3365), True, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3383, 3486), 'glob.glob', 'glob.glob', (['"""/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/train/*.ply"""'], {}), "(\n '/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/train/*.ply'\n )\n", (3392, 3486), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3603, 3704), 'glob.glob', 'glob.glob', (['"""/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/val/*.ply"""'], {}), "(\n '/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_1/val/*.ply'\n )\n", (3612, 3704), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3789, 3892), 'glob.glob', 'glob.glob', (['"""/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/train/*.ply"""'], {}), "(\n '/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/train/*.ply'\n )\n", (3798, 3892), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3977, 4078), 'glob.glob', 'glob.glob', (['"""/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/val/*.ply"""'], {}), "(\n '/media/hdd/zhengtian/Occuseg/data/scannet_partial/instance/partial_2/val/*.ply'\n )\n", (3986, 4078), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((1713, 1730), 'plyfile.PlyData', 'plyfile.PlyData', ([], {}), '()\n', (1728, 1730), False, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((1841, 1872), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['v[:, 3:6]'], {}), '(v[:, 3:6])\n', (1861, 1872), True, 'import numpy as np\n'), ((2965, 2985), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2974, 2985), False, 'import json\n'), ((3532, 3546), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3544, 3546), True, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3718, 3732), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3730, 3732), True, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((3906, 3920), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3918, 3920), True, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n'), ((4092, 4106), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4104, 4106), True, 'import glob, plyfile, numpy as np, multiprocessing as mp, torch\n')]
|
import os
from subprocess import run
#setting up environment
command = ['bash','-c','source ./environment.sh']
res = run(command)
if res.returncode:
raise Exception('set up environment failed!')
try:
github_user = os.environ['GITHUB_USER']
github_passwd = os.environ['GITHUB_PASSWORD']
project_name = os.environ['PROJECT_NAME']
except:
raise Exception('Environment variables not found. Please, try: "source environment.sh" from project root folder.')
#project name
print('project name ',project_name)
#pull from origin
req = input('pull from origin to master? [y/N]: ') or 'n'
if req.lower() == 'y':
print('pulling from origin to master branch')
res = run(['git','pull','origin','master'])
else:
print('pull skipped.')
#force install package
print('installing package...')
res = run(['pip','install','--upgrade','--force-reinstall','--no-deps','.'])
if res.returncode:
raise Exception('pip failed!')
#run tests
print('running tests...')
res = run(['python', './tests/test_pieces.py'])
if res.returncode:
raise Exception('test not passed!')
#git add
print('git add...')
run(['git', 'add', '.'])
#git commit
print('git commit...')
message = input('git commit message (blank to cancel): ') or 'n'
if message != 'n':
res = run(['git', 'commit', '-m', message])
if res.returncode:
raise Exception('git commit failed!')
else:
print('git commit skipped.')
#git push origin
push_ok = input('Do you want to push changes to remote? [y/N]: ') or 'n'
print('git push origin master...')
if push_ok.lower() == 'y':
run(['git','push','https://{0}:{1}@github.com/{0}/{2}'.format(github_user,github_passwd,project_name),'master'])
else:
print('git push skipped.')
|
[
"subprocess.run"
] |
[((117, 129), 'subprocess.run', 'run', (['command'], {}), '(command)\n', (120, 129), False, 'from subprocess import run\n'), ((795, 870), 'subprocess.run', 'run', (["['pip', 'install', '--upgrade', '--force-reinstall', '--no-deps', '.']"], {}), "(['pip', 'install', '--upgrade', '--force-reinstall', '--no-deps', '.'])\n", (798, 870), False, 'from subprocess import run\n'), ((961, 1002), 'subprocess.run', 'run', (["['python', './tests/test_pieces.py']"], {}), "(['python', './tests/test_pieces.py'])\n", (964, 1002), False, 'from subprocess import run\n'), ((1089, 1113), 'subprocess.run', 'run', (["['git', 'add', '.']"], {}), "(['git', 'add', '.'])\n", (1092, 1113), False, 'from subprocess import run\n'), ((666, 706), 'subprocess.run', 'run', (["['git', 'pull', 'origin', 'master']"], {}), "(['git', 'pull', 'origin', 'master'])\n", (669, 706), False, 'from subprocess import run\n'), ((1241, 1278), 'subprocess.run', 'run', (["['git', 'commit', '-m', message]"], {}), "(['git', 'commit', '-m', message])\n", (1244, 1278), False, 'from subprocess import run\n')]
|
import numpy as np
import cv2
from skimage.io import imread, imsave
from skimage.io import imshow
# lifted from http://blog.christianperone.com/2015/01/real-time-drone-object-tracking-using-python-and-opencv/
def run_main():
cap = cv2.VideoCapture('upabove.mp4')
# Read the first frame of the video
ret, frame = cap.read()
imsave("frame0001.png",cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
imshow(frame)
# Set the ROI (Region of Interest). Actually, this is a
# rectangle of the building that we're tracking
c,r,w,h = 150,250,70,70
track_window = (c,r,w,h)
# Create mask and normalized histogram
roi = frame[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0., 30.,32.)), np.array((180.,255.,255.)))
roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 80, 1)
while True:
ret, frame = cap.read()
if not ret:
break
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], roi_hist, [0,180], 1)
ret, track_window = cv2.meanShift(dst, track_window, term_crit)
x,y,w,h = track_window
cv2.rectangle(frame, (x,y), (x+w,y+h), 255, 2)
cv2.putText(frame, 'Tracked', (x-25,y-10), cv2.FONT_HERSHEY_SIMPLEX,
1, (255,255,255), 2)
cv2.imshow('Tracking', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
run_main()
|
[
"cv2.putText",
"cv2.cvtColor",
"cv2.calcHist",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.array",
"cv2.calcBackProject",
"skimage.io.imshow",
"cv2.normalize",
"cv2.destroyAllWindows",
"cv2.meanShift"
] |
[((237, 268), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""upabove.mp4"""'], {}), "('upabove.mp4')\n", (253, 268), False, 'import cv2\n'), ((411, 424), 'skimage.io.imshow', 'imshow', (['frame'], {}), '(frame)\n', (417, 424), False, 'from skimage.io import imshow\n'), ((683, 719), 'cv2.cvtColor', 'cv2.cvtColor', (['roi', 'cv2.COLOR_BGR2HSV'], {}), '(roi, cv2.COLOR_BGR2HSV)\n', (695, 719), False, 'import cv2\n'), ((820, 871), 'cv2.calcHist', 'cv2.calcHist', (['[hsv_roi]', '[0]', 'mask', '[180]', '[0, 180]'], {}), '([hsv_roi], [0], mask, [180], [0, 180])\n', (832, 871), False, 'import cv2\n'), ((876, 934), 'cv2.normalize', 'cv2.normalize', (['roi_hist', 'roi_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)\n', (889, 934), False, 'import cv2\n'), ((1635, 1658), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1656, 1658), False, 'import cv2\n'), ((366, 404), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (378, 404), False, 'import cv2\n'), ((752, 779), 'numpy.array', 'np.array', (['(0.0, 30.0, 32.0)'], {}), '((0.0, 30.0, 32.0))\n', (760, 779), True, 'import numpy as np\n'), ((777, 808), 'numpy.array', 'np.array', (['(180.0, 255.0, 255.0)'], {}), '((180.0, 255.0, 255.0))\n', (785, 808), True, 'import numpy as np\n'), ((1123, 1161), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (1135, 1161), False, 'import cv2\n'), ((1176, 1230), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0]', 'roi_hist', '[0, 180]', '(1)'], {}), '([hsv], [0], roi_hist, [0, 180], 1)\n', (1195, 1230), False, 'import cv2\n'), ((1259, 1302), 'cv2.meanShift', 'cv2.meanShift', (['dst', 'track_window', 'term_crit'], {}), '(dst, track_window, term_crit)\n', (1272, 1302), False, 'import cv2\n'), ((1343, 1395), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), 255, 2)\n', (1356, 1395), False, 'import cv2\n'), ((1398, 1498), 'cv2.putText', 'cv2.putText', (['frame', '"""Tracked"""', '(x - 25, y - 10)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 255)', '(2)'], {}), "(frame, 'Tracked', (x - 25, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (255, 255, 255), 2)\n", (1409, 1498), False, 'import cv2\n'), ((1517, 1546), 'cv2.imshow', 'cv2.imshow', (['"""Tracking"""', 'frame'], {}), "('Tracking', frame)\n", (1527, 1546), False, 'import cv2\n'), ((1559, 1573), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1570, 1573), False, 'import cv2\n')]
|
import re
import logging
from google.appengine.api import urlfetch
from helpers.team_manipulator import TeamManipulator
from models.team import Team
class TeamHelper(object):
"""
Helper to sort teams and stuff
"""
@classmethod
def sortTeams(self, team_list):
"""
Takes a list of Teams (not a Query object).
"""
# Sometimes there are None objects in the list.
team_list = filter(None, team_list)
team_list = sorted(team_list, key=lambda team: team.team_number)
return team_list
class TeamTpidHelper(object):
# Separates tpids on the FIRST list of all teams.
teamRe = re.compile(r'tpid=[A-Za-z0-9=&;\-:]*?"><b>\d+')
# Extracts the team number from the team result.
teamNumberRe = re.compile(r'\d+$')
# Extracts the tpid from the team result.
tpidRe = re.compile(r'\d+')
# Extracts the link to the next page of results on the FIRST list of all teams.
lastPageRe = re.compile(r'Next ->')
TPID_URL_PATTERN = "https://my.usfirst.org/myarea/index.lasso?page=searchresults&programs=FRC&reports=teams&sort_teams=number&results_size=250&omit_searchform=1&season_FRC=%s&skip_teams=%s"
@classmethod
def scrapeTpids(self, skip, year):
"""
Searches the FIRST list of all teams for tpids, writing in the datastore.
Also creates new Team objects.
This code is modified from <NAME>'s frclinks source and modified
to fit in the TBA framework. He has given us permission to borrow
his code.
"""
while 1:
logging.info("Fetching 250 teams based on %s data, skipping %s" % (year, skip))
tpids_dict = dict()
# FIRST is now checking the 'Referer' header for the string 'usfirst.org'.
# See https://github.com/patfair/frclinks/commit/051bf91d23ca0242dad5b1e471f78468173f597f
teamList = urlfetch.fetch(self.TPID_URL_PATTERN % (year, skip), headers={'Referrer': 'usfirst.org'}, deadline=10)
teamResults = self.teamRe.findall(teamList.content)
for teamResult in teamResults:
teamNumber = self.teamNumberRe.findall(teamResult)[0]
teamTpid = self.tpidRe.findall(teamResult)[0]
logging.info("Team %s TPID was %s in year %s." % (teamNumber, teamTpid, year))
tpids_dict[teamNumber] = teamTpid
teams = [Team(
team_number=int(team_number),
first_tpid=int(tpids_dict[team_number]),
first_tpid_year=int(year),
id="frc" + str(team_number)
)
for team_number in tpids_dict]
TeamManipulator.createOrUpdate(teams)
skip = int(skip) + 250
# Handle degenerate cases.
if skip > 10000:
return None
if len(self.lastPageRe.findall(teamList.content)) == 0:
return None
|
[
"logging.info",
"google.appengine.api.urlfetch.fetch",
"helpers.team_manipulator.TeamManipulator.createOrUpdate",
"re.compile"
] |
[((658, 706), 're.compile', 're.compile', (['"""tpid=[A-Za-z0-9=&;\\\\-:]*?"><b>\\\\d+"""'], {}), '(\'tpid=[A-Za-z0-9=&;\\\\-:]*?"><b>\\\\d+\')\n', (668, 706), False, 'import re\n'), ((778, 797), 're.compile', 're.compile', (['"""\\\\d+$"""'], {}), "('\\\\d+$')\n", (788, 797), False, 'import re\n'), ((857, 875), 're.compile', 're.compile', (['"""\\\\d+"""'], {}), "('\\\\d+')\n", (867, 875), False, 'import re\n'), ((977, 998), 're.compile', 're.compile', (['"""Next ->"""'], {}), "('Next ->')\n", (987, 998), False, 'import re\n'), ((1572, 1651), 'logging.info', 'logging.info', (["('Fetching 250 teams based on %s data, skipping %s' % (year, skip))"], {}), "('Fetching 250 teams based on %s data, skipping %s' % (year, skip))\n", (1584, 1651), False, 'import logging\n'), ((1882, 1988), 'google.appengine.api.urlfetch.fetch', 'urlfetch.fetch', (['(self.TPID_URL_PATTERN % (year, skip))'], {'headers': "{'Referrer': 'usfirst.org'}", 'deadline': '(10)'}), "(self.TPID_URL_PATTERN % (year, skip), headers={'Referrer':\n 'usfirst.org'}, deadline=10)\n", (1896, 1988), False, 'from google.appengine.api import urlfetch\n'), ((2607, 2644), 'helpers.team_manipulator.TeamManipulator.createOrUpdate', 'TeamManipulator.createOrUpdate', (['teams'], {}), '(teams)\n', (2637, 2644), False, 'from helpers.team_manipulator import TeamManipulator\n'), ((2216, 2294), 'logging.info', 'logging.info', (["('Team %s TPID was %s in year %s.' % (teamNumber, teamTpid, year))"], {}), "('Team %s TPID was %s in year %s.' % (teamNumber, teamTpid, year))\n", (2228, 2294), False, 'import logging\n')]
|
"""
Some Tools For Coder
author: <NAME>
website: https://github.com/IanVzs/Halahayawa
Last edited: 10 03 2021
"""
import time
import json
import hashlib
from datetime import datetime
def json_loads(str_data):
try:
return json.loads(str_data)
except:
return {}
def json_dumps(data, ensure_ascii=False):
try:
data = json.dumps(data, ensure_ascii=ensure_ascii)
except:
pass
return data
def datetime2str(data, fmt="%Y-%m-%d %H:%M:%S"):
result = ''
if data and isinstance(data, datetime):
result = data.strftime(fmt)
return result
def str2datetime(data, fmt="%Y-%m-%d %H:%M:%S"):
result = None
if data and isinstance(data, str):
result = datetime.strptime(data, fmt)
return result
def datetime2int(data, unit='s'):
result = 0
if data and isinstance(data,
str) and len(data) == len("2021-01-18 14:55:00"):
data = str2datetime(data)
if data and isinstance(data, datetime):
result = int(time.mktime(data.timetuple()))
return result
def count_age(str_date: str = '', dt_date: datetime = None) -> str:
"""
根据出生年月日计算当前年龄(年月周天)
"""
age = ''
if str_date and len(str_date) >= len("2020-01-01"):
try:
str_date = str_date[:10]
dt_date = datetime.strptime(str_date, "%Y-%m-%d")
except:
dt_date = None
if dt_date:
now = datetime.now()
m = (now.year * 12 + now.month) - (dt_date.year * 12 + dt_date.month)
y = m > 12 and int(m / 12)
if y:
age = f"{y}岁"
elif m:
age = f"{m}月"
else:
d = now.day - dt_date.day
w = int(d / 7)
age = w and f"{w}周" or f"{d or 1}天"
return age
def count_ago(str_date: str = '', dt_date: datetime = None) -> str:
"""
ret: x(秒|分钟|小时|天|周|月|年|)之前
"""
msg = ''
if str_date and len(str_date) >= len("2020-01-01"):
try:
str_date = str_date[:10]
dt_date = datetime.strptime(str_date, "%Y-%m-%d")
except:
dt_date = None
if dt_date:
now = datetime.now()
interval = now - dt_date
if interval.days > 0:
msg = msg = count_age(dt_date=dt_date)
msg = msg.replace('岁', '年')
elif interval.days == 0 and interval.seconds > 10:
_s = interval.seconds
_m = _s and int(_s / 60)
_h = _m and int(_m / 60)
_str_s = (_s and f"{_s}秒")
_str_m = (_m and f"{_m}分钟" or _str_s)
_str_h = (_h and f"{_h}小时" or _str_m)
msg = _str_s and _str_m and _str_h
else:
msg = '刚刚'
msg = f"{msg}之前" if "刚刚" not in msg else msg
return msg
def time_now(ty: str = 'str', fmt="%Y-%m-%d %H:%M:%S"):
now = datetime.now()
if ty == "str":
return datetime2str(now)
else:
return now
def today(ty: str = 'str', fmt="%Y-%m-%d"):
now = datetime.now()
if ty == "str":
return datetime2str(now, fmt=fmt)
else:
return now
def md5_convert(string):
"""
计算字符串md5值
:param string: 输入字符串
:return: 字符串md5
"""
m = hashlib.md5()
m.update(string.encode())
return m.hexdigest()
def check_keys(keys, must=None) -> bool:
if must and set(keys) > set(must):
return True
elif must:
return False
else:
return True
def lenth_time(secend: int) -> str:
show = ''
power = 2
dict_power = {2: 'h', 1: 'm', 0: 's'}
while secend:
num, secend = secend // (60**power), secend % (60**power)
if num:
show += f"{num}{dict_power[power]}"
power -= 1
return show
|
[
"hashlib.md5",
"json.loads",
"json.dumps",
"datetime.datetime.strptime",
"datetime.datetime.now"
] |
[((2870, 2884), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2882, 2884), False, 'from datetime import datetime\n'), ((3023, 3037), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3035, 3037), False, 'from datetime import datetime\n'), ((3239, 3252), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3250, 3252), False, 'import hashlib\n'), ((236, 256), 'json.loads', 'json.loads', (['str_data'], {}), '(str_data)\n', (246, 256), False, 'import json\n'), ((355, 398), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': 'ensure_ascii'}), '(data, ensure_ascii=ensure_ascii)\n', (365, 398), False, 'import json\n'), ((730, 758), 'datetime.datetime.strptime', 'datetime.strptime', (['data', 'fmt'], {}), '(data, fmt)\n', (747, 758), False, 'from datetime import datetime\n'), ((1450, 1464), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1462, 1464), False, 'from datetime import datetime\n'), ((2173, 2187), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2185, 2187), False, 'from datetime import datetime\n'), ((1337, 1376), 'datetime.datetime.strptime', 'datetime.strptime', (['str_date', '"""%Y-%m-%d"""'], {}), "(str_date, '%Y-%m-%d')\n", (1354, 1376), False, 'from datetime import datetime\n'), ((2060, 2099), 'datetime.datetime.strptime', 'datetime.strptime', (['str_date', '"""%Y-%m-%d"""'], {}), "(str_date, '%Y-%m-%d')\n", (2077, 2099), False, 'from datetime import datetime\n')]
|
"""
Module to train the DEEPred NN classifier
"""
import torch
from torch.utils.data import RandomSampler
import torch.nn as nn
import torch.optim as optim
from ..models import Model
from ..io.utils import split_batch, shuffle_data
def train(
x_train: torch.Tensor,
y_train: torch.Tensor,
epochs: int,
parameters: dict,
minibatch_size: int = 32,
batchnorm: bool = True,
p_dropout: float = 0.5,
learning_rate: float = 0.005,
) -> nn.Module:
"""
Train the DEEPred pytroch classifier
Parameters
----------
x_train : torch.Tensor
The tensor containing the feature vectors for training
y_train : torch.Tensor
The tensor containing the label vectors for training
epochs : int
The number of epochs for which to train the model
parameters : dict
The parameters that define the nodes of the neural network
minibatch_size : int, optional
The size of the minibatches created during training
Default value is 32
batchnorm : bool, optional
Flag that turns on batch normalization of the hidden layers
Default value is true
p_dropout : float, optional
The probability of the dropout
Default value is 0.5
learning_rate : float, optional
The learning rate for the neural network
Default value is 0.005
Returns
-------
List[str]
The list of protein ids associated with the GO term
"""
# NOTE: Normalize data before passing it into this function
model = Model(parameters, batchnorm=batchnorm, p_dropout=p_dropout)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
model.train()
for epoch in range(epochs):
loss_per_epoch = 0.0
x_shuff, y_shuff = shuffle_data(x_train, y_train)
for batch_ind, x_mb, y_mb in split_batch(x_shuff, y_shuff, minibatch_size):
if x_mb.shape[0] < 5:
continue
optimizer.zero_grad()
y_pred = model(x_mb)
loss = criterion(y_pred, y_mb)
loss.backward()
loss_per_epoch += loss.item()
optimizer.step()
print(f"Epoch {epoch}: train loss: {loss_per_epoch}")
return model
|
[
"torch.nn.BCEWithLogitsLoss"
] |
[((1737, 1759), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (1757, 1759), True, 'import torch.nn as nn\n')]
|
import math
import numpy as np
from django.shortcuts import get_object_or_404
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from scipy.spatial import distance
from .models import Person
from .serializers import (
PersonIdSerializer,
PersonCreateUpdateSerializer,
GetPersonSerializer
)
from .utils import get_vector
class PersonViewSet(viewsets.ViewSet):
def list(self, request):
"""Выводит ID вех экземпляров Person"""
queryset = Person.objects.all()
serializer = PersonIdSerializer(queryset, many=True)
return Response(serializer.data)
def create(self, request):
"""Создает экземпляр Person с first_name и last_name."""
serializer = PersonCreateUpdateSerializer(data=request.data)
if serializer.is_valid():
person = Person.objects.create(**serializer.validated_data)
return Response(
{'id': person.pk}, status=status.HTTP_201_CREATED
)
return Response(status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk):
"""Выводит ия и фамилию одного экземпляра Person."""
queryset = get_object_or_404(Person, pk=pk)
serializer = GetPersonSerializer(queryset)
return Response(serializer.data)
def update(self, request, pk):
"""Добавляет вектор к экземпляру Person."""
size = (300, 300) # размер, до которого нужно сжать изображение
vector = get_vector(request, size) # получает вектор из запроса
serializer = PersonCreateUpdateSerializer(data=request.data)
if serializer.is_valid():
Person.objects.filter(pk=pk).update(
**serializer.validated_data,
vector=vector, have_vector=True
)
return Response(status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
"""Удаляет экземпляр Person."""
obj = Person.objects.filter(pk=pk).delete()
if obj[0] > 0:
return Response(
status=status.HTTP_204_NO_CONTENT
)
else:
return Response(
status=status.HTTP_400_BAD_REQUEST
)
@api_view()
def compare(request, pk_1, pk_2):
"""Находит евклидово расстояние
между векторами двух экземпляров Person.
"""
vector1 = get_object_or_404(Person, pk=pk_1).vector
vector2 = get_object_or_404(Person, pk=pk_2).vector
vector1 = vector1[1:-1].split(', ')
vector2 = vector2[1:-1].split(', ')
for i in range(len(vector1)):
vector1[i] = float(vector1[i])
for i in range(len(vector2)):
vector2[i] = float(vector2[i])
result = math.sqrt(
sum([(a - b) ** 2 for a, b in zip(vector1, vector2)])
)
return Response({'result': result})
|
[
"django.shortcuts.get_object_or_404",
"rest_framework.decorators.api_view",
"rest_framework.response.Response"
] |
[((2364, 2374), 'rest_framework.decorators.api_view', 'api_view', ([], {}), '()\n', (2372, 2374), False, 'from rest_framework.decorators import api_view\n'), ((2956, 2984), 'rest_framework.response.Response', 'Response', (["{'result': result}"], {}), "({'result': result})\n", (2964, 2984), False, 'from rest_framework.response import Response\n'), ((655, 680), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (663, 680), False, 'from rest_framework.response import Response\n'), ((1080, 1124), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (1088, 1124), False, 'from rest_framework.response import Response\n'), ((1243, 1275), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Person'], {'pk': 'pk'}), '(Person, pk=pk)\n', (1260, 1275), False, 'from django.shortcuts import get_object_or_404\n'), ((1343, 1368), 'rest_framework.response.Response', 'Response', (['serializer.data'], {}), '(serializer.data)\n', (1351, 1368), False, 'from rest_framework.response import Response\n'), ((1960, 2004), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (1968, 2004), False, 'from rest_framework.response import Response\n'), ((2517, 2551), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Person'], {'pk': 'pk_1'}), '(Person, pk=pk_1)\n', (2534, 2551), False, 'from django.shortcuts import get_object_or_404\n'), ((2573, 2607), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Person'], {'pk': 'pk_2'}), '(Person, pk=pk_2)\n', (2590, 2607), False, 'from django.shortcuts import get_object_or_404\n'), ((974, 1033), 'rest_framework.response.Response', 'Response', (["{'id': person.pk}"], {'status': 'status.HTTP_201_CREATED'}), "({'id': person.pk}, status=status.HTTP_201_CREATED)\n", (982, 1033), False, 'from rest_framework.response import Response\n'), ((1908, 1943), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (1916, 1943), False, 'from rest_framework.response import Response\n'), ((2177, 2220), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_204_NO_CONTENT'}), '(status=status.HTTP_204_NO_CONTENT)\n', (2185, 2220), False, 'from rest_framework.response import Response\n'), ((2286, 2330), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (2294, 2330), False, 'from rest_framework.response import Response\n')]
|
"""
EfficientNet for ImageNet-1K, implemented in Keras.
Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
"""
__all__ = ['efficientnet_model', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3',
'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_b0b',
'efficientnet_b1b', 'efficientnet_b2b', 'efficientnet_b3b']
import os
import math
from keras import layers as nn
from keras.models import Model
from .common import is_channels_first, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, se_block
def calc_tf_padding(x,
kernel_size,
strides=1,
dilation=1):
"""
Calculate TF-same like padding size.
Parameters:
----------
x : tensor
Input tensor.
kernel_size : int
Convolution window size.
strides : int, default 1
Strides of the convolution.
dilation : int, default 1
Dilation value for convolution layer.
Returns
-------
tuple of 4 int
The size of the padding.
"""
height, width = x.shape[2:]
oh = math.ceil(height / strides)
ow = math.ceil(width / strides)
pad_h = max((oh - 1) * strides + (kernel_size - 1) * dilation + 1 - height, 0)
pad_w = max((ow - 1) * strides + (kernel_size - 1) * dilation + 1 - width, 0)
return (pad_h // 2, pad_h - pad_h // 2), (pad_w // 2, pad_w - pad_w // 2)
def round_channels(channels,
factor,
divisor=8):
"""
Round weighted channel number.
Parameters:
----------
channels : int
Original number of channels.
factor : float
Weight factor.
divisor : int
Alignment value.
Returns
-------
int
Weighted number of channels.
"""
channels *= factor
new_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if new_channels < 0.9 * channels:
new_channels += divisor
return new_channels
def effi_dws_conv_unit(x,
in_channels,
out_channels,
strides,
bn_epsilon,
activation,
tf_mode,
name="effi_dws_conv_unit"):
"""
EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution
layers.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_epsilon : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
name : str, default 'effi_dws_conv_unit'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
residual = (in_channels == out_channels) and (strides == 1)
if residual:
identity = x
if tf_mode:
x = nn.ZeroPadding2D(
padding=calc_tf_padding(x, kernel_size=3),
name=name + "/dw_conv_pad")(x)
x = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
activation=activation,
name=name + "/dw_conv")
x = se_block(
x=x,
channels=in_channels,
reduction=4,
activation=activation,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
activation=None,
name=name + "/pw_conv")
if residual:
x = nn.add([x, identity], name=name + "/add")
return x
def effi_inv_res_unit(x,
in_channels,
out_channels,
kernel_size,
strides,
expansion_factor,
bn_epsilon,
activation,
tf_mode,
name="effi_inv_res_unit"):
"""
EfficientNet inverted residual unit.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion_factor : int
Factor for expansion of channels.
bn_epsilon : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
name : str, default 'effi_inv_res_unit'
Unit name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * expansion_factor
dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None)
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
activation=activation,
name=name + "/conv1")
if tf_mode:
x = nn.ZeroPadding2D(
padding=calc_tf_padding(x, kernel_size=kernel_size, strides=strides),
name=name + "/conv2_pad")(x)
x = dwconv_block_fn(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=(0 if tf_mode else (kernel_size // 2)),
bn_epsilon=bn_epsilon,
activation=activation,
name=name + "/conv2")
x = se_block(
x=x,
channels=mid_channels,
reduction=24,
activation=activation,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
activation=None,
name=name + "/conv3")
if residual:
x = nn.add([x, identity], name=name + "/add")
return x
def effi_init_block(x,
in_channels,
out_channels,
bn_epsilon,
activation,
tf_mode,
name="effi_init_block"):
"""
EfficientNet specific initial block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
name : str, default 'effi_init_block'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
if tf_mode:
x = nn.ZeroPadding2D(
padding=calc_tf_padding(x, kernel_size=3, strides=2),
name=name + "/conv_pad")(x)
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
activation=activation,
name=name + "/conv")
return x
def efficientnet_model(channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_epsilon=1e-5,
in_channels=3,
in_size=(224, 224),
classes=1000):
"""
EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Numbers of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
input_shape = (in_channels, in_size[0], in_size[1]) if is_channels_first() else\
(in_size[0], in_size[1], in_channels)
input = nn.Input(shape=input_shape)
activation = "swish"
x = effi_init_block(
x=input,
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
activation=activation,
tf_mode=tf_mode,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i == 0:
x = effi_dws_conv_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
activation=activation,
tf_mode=tf_mode,
name="features/stage{}/unit{}".format(i + 1, j + 1))
else:
x = effi_inv_res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
expansion_factor=expansion_factor,
bn_epsilon=bn_epsilon,
activation=activation,
tf_mode=tf_mode,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
activation=activation,
name="features/final_block")
in_channels = final_block_channels
x = nn.GlobalAveragePooling2D(
name="features/final_pool")(x)
if dropout_rate > 0.0:
x = nn.Dropout(
rate=dropout_rate,
name="output/dropout")(x)
x = nn.Dense(
units=classes,
input_dim=in_channels,
name="output/fc")(x)
model = Model(inputs=input, outputs=x)
model.in_size = in_size
model.classes = classes
return model
def get_efficientnet(version,
in_size,
tf_mode=False,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create EfficientNet model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('b0'...'b7').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "b0":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
dropout_rate = 0.2
elif version == "b1":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
dropout_rate = 0.2
elif version == "b2":
assert (in_size == (260, 260))
depth_factor = 1.2
width_factor = 1.1
dropout_rate = 0.3
elif version == "b3":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
dropout_rate = 0.3
elif version == "b4":
assert (in_size == (380, 380))
depth_factor = 1.8
width_factor = 1.4
dropout_rate = 0.4
elif version == "b5":
assert (in_size == (456, 456))
depth_factor = 2.2
width_factor = 1.6
dropout_rate = 0.4
elif version == "b6":
assert (in_size == (528, 528))
depth_factor = 2.6
width_factor = 1.8
dropout_rate = 0.5
elif version == "b7":
assert (in_size == (600, 600))
depth_factor = 3.1
width_factor = 2.0
dropout_rate = 0.5
else:
raise ValueError("Unsupported EfficientNet version {}".format(version))
init_block_channels = 32
layers = [1, 2, 2, 3, 3, 4, 1]
downsample = [1, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 40, 80, 112, 192, 320]
expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3]
strides_per_stage = [1, 2, 2, 2, 1, 2, 1]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci, width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels, width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels, width_factor))
final_block_channels = round_channels(final_block_channels, width_factor)
net = efficientnet_model(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_epsilon=bn_epsilon,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def efficientnet_b0(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs)
def efficientnet_b1(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs)
def efficientnet_b2(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs)
def efficientnet_b3(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs)
def efficientnet_b4(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs)
def efficientnet_b5(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs)
def efficientnet_b6(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs)
def efficientnet_b7(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs)
def efficientnet_b0b(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0b",
**kwargs)
def efficientnet_b1b(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1b",
**kwargs)
def efficientnet_b2b(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2b",
**kwargs)
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b",
**kwargs)
def _test():
import numpy as np
import keras
pretrained = False
models = [
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_b0b,
efficientnet_b1b,
efficientnet_b2b,
efficientnet_b3b,
]
for model in models:
net = model(pretrained=pretrained)
# net.summary()
weight_count = keras.utils.layer_utils.count_params(net.trainable_weights)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_b0 or weight_count == 5288548)
assert (model != efficientnet_b1 or weight_count == 7794184)
assert (model != efficientnet_b2 or weight_count == 9109994)
assert (model != efficientnet_b3 or weight_count == 12233232)
assert (model != efficientnet_b4 or weight_count == 19341616)
assert (model != efficientnet_b5 or weight_count == 30389784)
assert (model != efficientnet_b6 or weight_count == 43040704)
assert (model != efficientnet_b7 or weight_count == 66347960)
assert (model != efficientnet_b0b or weight_count == 5288548)
assert (model != efficientnet_b1b or weight_count == 7794184)
assert (model != efficientnet_b2b or weight_count == 9109994)
assert (model != efficientnet_b3b or weight_count == 12233232)
if is_channels_first():
x = np.zeros((1, 3, net.in_size[0], net.in_size[1]), np.float32)
else:
x = np.zeros((1, net.in_size[0], net.in_size[1], 3), np.float32)
y = net.predict(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
|
[
"math.ceil",
"keras.layers.Dropout",
"keras.layers.add",
"numpy.zeros",
"keras.models.Model",
"keras.layers.GlobalAveragePooling2D",
"keras.layers.Dense",
"keras.utils.layer_utils.count_params",
"keras.layers.Input",
"os.path.join"
] |
[((1245, 1272), 'math.ceil', 'math.ceil', (['(height / strides)'], {}), '(height / strides)\n', (1254, 1272), False, 'import math\n'), ((1282, 1308), 'math.ceil', 'math.ceil', (['(width / strides)'], {}), '(width / strides)\n', (1291, 1308), False, 'import math\n'), ((9902, 9929), 'keras.layers.Input', 'nn.Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (9910, 9929), True, 'from keras import layers as nn\n'), ((12152, 12182), 'keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'x'}), '(inputs=input, outputs=x)\n', (12157, 12182), False, 'from keras.models import Model\n'), ((12495, 12532), 'os.path.join', 'os.path.join', (['"""~"""', '""".keras"""', '"""models"""'], {}), "('~', '.keras', 'models')\n", (12507, 12532), False, 'import os\n'), ((4100, 4141), 'keras.layers.add', 'nn.add', (['[x, identity]'], {'name': "(name + '/add')"}), "([x, identity], name=name + '/add')\n", (4106, 4141), True, 'from keras import layers as nn\n'), ((6651, 6692), 'keras.layers.add', 'nn.add', (['[x, identity]'], {'name': "(name + '/add')"}), "([x, identity], name=name + '/add')\n", (6657, 6692), True, 'from keras import layers as nn\n'), ((11851, 11904), 'keras.layers.GlobalAveragePooling2D', 'nn.GlobalAveragePooling2D', ([], {'name': '"""features/final_pool"""'}), "(name='features/final_pool')\n", (11876, 11904), True, 'from keras import layers as nn\n'), ((12046, 12110), 'keras.layers.Dense', 'nn.Dense', ([], {'units': 'classes', 'input_dim': 'in_channels', 'name': '"""output/fc"""'}), "(units=classes, input_dim=in_channels, name='output/fc')\n", (12054, 12110), True, 'from keras import layers as nn\n'), ((25474, 25533), 'keras.utils.layer_utils.count_params', 'keras.utils.layer_utils.count_params', (['net.trainable_weights'], {}), '(net.trainable_weights)\n', (25510, 25533), False, 'import keras\n'), ((11957, 12009), 'keras.layers.Dropout', 'nn.Dropout', ([], {'rate': 'dropout_rate', 'name': '"""output/dropout"""'}), "(rate=dropout_rate, name='output/dropout')\n", (11967, 12009), True, 'from keras import layers as nn\n'), ((14883, 14911), 'math.ceil', 'math.ceil', (['(li * depth_factor)'], {}), '(li * depth_factor)\n', (14892, 14911), False, 'import math\n'), ((26484, 26544), 'numpy.zeros', 'np.zeros', (['(1, 3, net.in_size[0], net.in_size[1])', 'np.float32'], {}), '((1, 3, net.in_size[0], net.in_size[1]), np.float32)\n', (26492, 26544), True, 'import numpy as np\n'), ((26575, 26635), 'numpy.zeros', 'np.zeros', (['(1, net.in_size[0], net.in_size[1], 3)', 'np.float32'], {}), '((1, net.in_size[0], net.in_size[1], 3), np.float32)\n', (26583, 26635), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import namedtuple, OrderedDict
import io
import re
from jinja2 import Template
from parametergenerate import utility
try:
#py2
unicode=unicode
except NameError:
#py3
unicode=str
def find_value(target_file, search_params,
encoding=None, trap_undefined_error=False):
# auto encoding
if encoding is None:
encoding = utility.check_encode(target_file)
collector = ListCollector(target_file, encoding=encoding)
v_params = [ValidatedParam(p) for p in search_params]
param_elements = [[] for _ in v_params]
for i, element in collector.elements_many(v_params):
p = v_params[i]
if p.template:
element = _apply_template(p.template, element)
element = _apply_cast(p.value_type, element, trap_undefined_error)
# pyyamlはOrderedDict対応していないため、一般のdictに変更。
# 入力の順番通りにする方法1つはpyyamlをruamel.yamlに更新
param_elements[i].append(dict(element))
# 収集した複数のelementを結ぶ
key_value_table = {}
for s_param, elements in zip(search_params, param_elements):
if not elements:
if trap_undefined_error:
raise KeyError('no matches found for {}'.format(s_param))
else:
# elementがある場合のみ、結果の配列を出力に入れる
tmp_table = utility.path2dict(s_param, elements)
utility.dict_list_marge(key_value_table, tmp_table)
return key_value_table
def _apply_template(template, element):
e = {}
for k, v in element.items():
e[k] = template.render({'VALUE': v})
return e
def _apply_cast(v_type, element, trap_undefined_error):
e = {}
for k, v in element.items():
e[k] = utility.cast(v, v_type, trap_undefined_error)
return e
class ValidatedParam:
def __init__(self, param_dict):
regexp, element_names, ignore, template, value_type = self._parse(param_dict)
self._validate(regexp, element_names)
self.regexp = regexp
self.element_names = element_names
self.ignore = ignore
self.template = template
self.value_type = value_type
@staticmethod
def _parse(param):
try:
regexp = param['regexp']
element_names = param['element']
except KeyError:
raise KeyError('"regexp" and "element" are required in param')
regexp = re.compile(regexp)
element_names = [unicode(en) for en in element_names]
template = param.get('value')
if template is not None:
template = Template(template)
value_type = param.get('value_type')
ignore = param.get('ignore')
return regexp, element_names, ignore, template, value_type
@staticmethod
def _validate(regexp, element_names):
name_count = len(element_names)
if name_count == 0:
raise ValueError('At least one item is required in element.')
group_count = re.compile(regexp).groups
if group_count != name_count:
raise ValueError('The number of element items ({}) '
'and number of regexp capture groups ({}) '
'should match.'.format(name_count, group_count))
class ListCollector:
def __init__(self, file_path, encoding='utf-8'):
self.file_path = file_path
self.encoding = encoding
def elements_many(self, params):
"""Generate (param_index, element) where element matches param conditions.
args:
params: iterable of ValidatedParameter
"""
for line in self._lines():
for i, p in enumerate(params):
element = self._detect_element(line, p)
if element:
yield i, element
def elements(self, param):
"""Generate each element matching param conditions
args:
param ValidatedParameter
"""
for _, element in self.elements_many([param]):
yield element
def _detect_element(self, line, param):
if param.ignore is not None:
if line.lstrip().startswith(param.ignore):
return
found = param.regexp.match(line)
if found is None:
return
return OrderedDict(zip(param.element_names, found.groups()))
def _lines(self):
with io.open(self.file_path, encoding=self.encoding) as f:
for line in f:
line = line.rstrip('\n')
yield line
|
[
"jinja2.Template",
"parametergenerate.utility.cast",
"parametergenerate.utility.check_encode",
"parametergenerate.utility.dict_list_marge",
"io.open",
"parametergenerate.utility.path2dict",
"re.compile"
] |
[((447, 480), 'parametergenerate.utility.check_encode', 'utility.check_encode', (['target_file'], {}), '(target_file)\n', (467, 480), False, 'from parametergenerate import utility\n'), ((1750, 1795), 'parametergenerate.utility.cast', 'utility.cast', (['v', 'v_type', 'trap_undefined_error'], {}), '(v, v_type, trap_undefined_error)\n', (1762, 1795), False, 'from parametergenerate import utility\n'), ((2427, 2445), 're.compile', 're.compile', (['regexp'], {}), '(regexp)\n', (2437, 2445), False, 'import re\n'), ((1360, 1396), 'parametergenerate.utility.path2dict', 'utility.path2dict', (['s_param', 'elements'], {}), '(s_param, elements)\n', (1377, 1396), False, 'from parametergenerate import utility\n'), ((1409, 1460), 'parametergenerate.utility.dict_list_marge', 'utility.dict_list_marge', (['key_value_table', 'tmp_table'], {}), '(key_value_table, tmp_table)\n', (1432, 1460), False, 'from parametergenerate import utility\n'), ((2603, 2621), 'jinja2.Template', 'Template', (['template'], {}), '(template)\n', (2611, 2621), False, 'from jinja2 import Template\n'), ((2999, 3017), 're.compile', 're.compile', (['regexp'], {}), '(regexp)\n', (3009, 3017), False, 'import re\n'), ((4408, 4455), 'io.open', 'io.open', (['self.file_path'], {'encoding': 'self.encoding'}), '(self.file_path, encoding=self.encoding)\n', (4415, 4455), False, 'import io\n')]
|
import pandas as pd
### デスクトップアプリ作成課題
def kimetsu_search(path, word):
# 検索対象取得
df=pd.read_csv(path)
source=list(df["name"])
# 検索
if word in source:
return True
else:
return False
def add_to_kimetsu(path, word):
# 検索対象取得
df=pd.read_csv("./source.csv")
source=list(df["name"])
source.append(word)
# CSV書き込み
df=pd.DataFrame(source,columns=["name"])
df.to_csv(path,encoding="utf_8-sig")
print(source)
|
[
"pandas.read_csv",
"pandas.DataFrame"
] |
[((91, 108), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (102, 108), True, 'import pandas as pd\n'), ((273, 300), 'pandas.read_csv', 'pd.read_csv', (['"""./source.csv"""'], {}), "('./source.csv')\n", (284, 300), True, 'import pandas as pd\n'), ((379, 417), 'pandas.DataFrame', 'pd.DataFrame', (['source'], {'columns': "['name']"}), "(source, columns=['name'])\n", (391, 417), True, 'import pandas as pd\n')]
|
import pytest
def test_concat_with_duplicate_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[pd.DataFrame({"a": [1], "b": [2]}), pd.DataFrame({"c": [0], "b": [3]}),],
axis=1,
)
def test_concat_mismatching_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[pd.DataFrame({"a": [1], "b": [2]}), pd.DataFrame({"c": [0], "b": [3]}),],
axis=0,
)
|
[
"pandas.DataFrame",
"pytest.raises"
] |
[((113, 156), 'pytest.raises', 'pytest.raises', (['captivity.CaptivityException'], {}), '(captivity.CaptivityException)\n', (126, 156), False, 'import pytest\n'), ((390, 433), 'pytest.raises', 'pytest.raises', (['captivity.CaptivityException'], {}), '(captivity.CaptivityException)\n', (403, 433), False, 'import pytest\n'), ((190, 224), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1], 'b': [2]}"], {}), "({'a': [1], 'b': [2]})\n", (202, 224), True, 'import pandas as pd\n'), ((226, 260), 'pandas.DataFrame', 'pd.DataFrame', (["{'c': [0], 'b': [3]}"], {}), "({'c': [0], 'b': [3]})\n", (238, 260), True, 'import pandas as pd\n'), ((467, 501), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1], 'b': [2]}"], {}), "({'a': [1], 'b': [2]})\n", (479, 501), True, 'import pandas as pd\n'), ((503, 537), 'pandas.DataFrame', 'pd.DataFrame', (["{'c': [0], 'b': [3]}"], {}), "({'c': [0], 'b': [3]})\n", (515, 537), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-04 06:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20160904_1201'),
]
operations = [
migrations.RenameModel(
old_name='Battles',
new_name='Battle',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((286, 347), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Battles"""', 'new_name': '"""Battle"""'}), "(old_name='Battles', new_name='Battle')\n", (308, 347), False, 'from django.db import migrations\n')]
|
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', "--type", help="file to process single file, folder to process a full folder")
parser.add_argument('input', help='source image/ folder that needs to be fully converted')
parser.add_argument('output', help='destinaton image/ folder with cartoonized images')
args = parser.parse_args()
if args.type == "file":
input_image = args.input
output_image = args.output
print(f"The files are {input_image} and {output_image}")
if args.type == "folder":
input_image_folder = args.input
output_image_folder = args.output
print(f"The folders are {input_image_folder} and {output_image_folder}")
|
[
"argparse.ArgumentParser"
] |
[((58, 83), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (81, 83), False, 'import argparse\n')]
|
from __future__ import print_function
import os
from termcolor import colored
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import models as Models
import global_vars as Global
from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig
from utils.logger import Logger
from datasets import MirroredDataset
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from models.autoencoders import VAE_Loss
def get_ae_config(args, model, dataset, home_path, BCE_Loss):
print("Preparing training D1 for %s"%(dataset.name))
# 80%, 20% for local train+test
train_ds, valid_ds = dataset.split_dataset(0.8)
if dataset.name in Global.mirror_augment:
print(colored("Mirror augmenting %s"%dataset.name, 'green'))
new_train_ds = train_ds + MirroredDataset(train_ds)
train_ds = new_train_ds
# Initialize the multi-threaded loaders.
train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
all_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
# Set up the model
model = model.to(args.device)
# Set up the criterion
criterion = None
if BCE_Loss:
criterion = nn.BCEWithLogitsLoss().to(args.device)
else:
criterion = nn.MSELoss().to(args.device)
model.default_sigmoid = True
# Set up the config
config = IterativeTrainerConfig()
config.name = 'autoencoder_%s_%s'%(dataset.name, model.preferred_name())
config.train_loader = train_loader
config.valid_loader = valid_loader
config.phases = {
'train': {'dataset' : train_loader, 'backward': True},
'test': {'dataset' : valid_loader, 'backward': False},
'all': {'dataset' : all_loader, 'backward': False},
}
config.criterion = criterion
config.classification = False
config.cast_float_label = False
config.autoencoder_target = True
config.stochastic_gradient = True
config.visualize = not args.no_visualize
config.sigmoid_viz = BCE_Loss
config.model = model
config.logger = Logger(home_path)
config.optim = optim.Adam(model.parameters(), lr=1e-3)
config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config.max_epoch = 120
if hasattr(model, 'train_config'):
model_train_config = model.train_config()
for key, value in model_train_config.items():
print('Overriding config.%s'%key)
config.__setattr__(key, value)
return config
def get_vae_config(args, model, dataset, home_path, BCE_Loss):
print("Preparing training D1 for %s"%(dataset.name))
# 80%, 20% for local train+test
train_ds, valid_ds = dataset.split_dataset(0.8)
if dataset.name in Global.mirror_augment:
print(colored("Mirror augmenting %s"%dataset.name, 'green'))
new_train_ds = train_ds + MirroredDataset(train_ds)
train_ds = new_train_ds
# Initialize the multi-threaded loaders.
train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
all_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
# Set up the model
model = model.to(args.device)
# Set up the criterion
criterion = VAE_Loss(model, BCE_Loss)
# Set up the config
config = IterativeTrainerConfig()
config.name = 'vae_%s_%s'%(dataset.name, model.preferred_name())
config.train_loader = train_loader
config.valid_loader = valid_loader
config.phases = {
'train': {'dataset' : train_loader, 'backward': True},
'test': {'dataset' : valid_loader, 'backward': False},
'all': {'dataset' : all_loader, 'backward': False},
}
config.criterion = criterion
config.classification = False
config.cast_float_label = False
config.autoencoder_target = True
config.stochastic_gradient = True
config.visualize = not args.no_visualize
config.sigmoid_viz = False
config.model = model
config.logger = Logger(home_path)
config.optim = optim.Adam(model.parameters(), lr=1e-3)
config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
config.max_epoch = 120
if hasattr(model, 'train_config'):
model_train_config = model.train_config()
for key, value in model_train_config.items():
print('Overriding config.%s'%key)
config.__setattr__(key, value)
return config
def train_BCE_AE(args, model, dataset):
train_autoencoder(args, model, dataset, BCE_Loss=True)
def train_MSE_AE(args, model, dataset):
train_autoencoder(args, model, dataset, BCE_Loss=False)
def train_autoencoder(args, model, dataset, BCE_Loss):
if BCE_Loss:
model.netid = "BCE." + model.netid
else:
model.netid = "MSE." + model.netid
home_path = Models.get_ref_model_path(args, model.__class__.__name__, dataset.name, model_setup=True, suffix_str=model.netid)
hbest_path = os.path.join(home_path, 'model.best.pth')
hlast_path = os.path.join(home_path, 'model.last.pth')
if not os.path.isdir(home_path):
os.makedirs(home_path)
if not os.path.isfile(hbest_path+".done"):
config = get_ae_config(args, model, dataset, home_path, BCE_Loss=BCE_Loss)
trainer = IterativeTrainer(config, args)
print(colored('Training from scratch', 'green'))
best_loss = 999999999
for epoch in range(1, config.max_epoch+1):
# Track the learning rates.
lrs = [float(param_group['lr']) for param_group in config.optim.param_groups]
config.logger.log('LRs', lrs, epoch)
config.logger.get_measure('LRs').legend = ['LR%d'%i for i in range(len(lrs))]
# One epoch of train and test.
trainer.run_epoch(epoch, phase='train')
trainer.run_epoch(epoch, phase='test')
train_loss = config.logger.get_measure('train_loss').mean_epoch()
test_loss = config.logger.get_measure('test_loss').mean_epoch()
config.logger.writer.add_scalar('train_loss', train_loss, epoch)
config.logger.writer.add_scalar('test_loss', test_loss, epoch)
config.scheduler.step(train_loss)
# vis in tensorboard
for (image, label) in config.valid_loader:
prediction = model(image.cuda()).data.cpu().squeeze().numpy()
prediction = (prediction - prediction.min())/(prediction.max() - prediction.min())
if len(prediction.shape) > 3 and prediction.shape[1] == 3:
prediction = prediction.transpose((0,2,3,1)) # change to N W H C
N = min(prediction.shape[0], 5)
fig, ax = plt.subplots(N, 2)
image = image.data.squeeze().numpy()
image = (image - image.min())/(image.max() - image.min())
if len(image.shape) > 3 and image.shape[1] == 3:
image = image.transpose((0,2,3,1))
for i in range(N):
ax[i, 0].imshow(prediction[i])
ax[i, 1].imshow(image[i])
config.logger.writer.add_figure('Vis', fig, epoch)
plt.close(fig)
break
if config.visualize:
# Show the average losses for all the phases in one figure.
config.logger.visualize_average_keys('.*_loss', 'Average Loss', trainer.visdom)
config.logger.visualize_average_keys('.*_accuracy', 'Average Accuracy', trainer.visdom)
config.logger.visualize_average('LRs', trainer.visdom)
# Save the logger for future reference.
torch.save(config.logger.measures, os.path.join(home_path, 'logger.pth'))
# Saving a checkpoint. Enable if needed!
# if args.save and epoch % 10 == 0:
# print('Saving a %s at iter %s'%(colored('snapshot', 'yellow'), colored('%d'%epoch, 'yellow')))
# torch.save(config.model.state_dict(), os.path.join(home_path, 'model.%d.pth'%epoch))
if args.save and test_loss < best_loss:
print('Updating the on file model with %s'%(colored('%.4f'%test_loss, 'red')))
best_loss = test_loss
torch.save(config.model.state_dict(), hbest_path)
torch.save({'finished':True}, hbest_path+".done")
torch.save(config.model.state_dict(), hlast_path)
if config.visualize:
trainer.visdom.save([trainer.visdom.env])
else:
print("Skipping %s"%(colored(home_path, 'yellow')))
def train_variational_autoencoder(args, model, dataset, BCE_Loss=True):
if BCE_Loss:
model.netid = "BCE." + model.netid
else:
model.netid = "MSE." + model.netid
home_path = Models.get_ref_model_path(args, model.__class__.__name__, dataset.name, model_setup=True, suffix_str=model.netid)
hbest_path = os.path.join(home_path, 'model.best.pth')
hlast_path = os.path.join(home_path, 'model.last.pth')
if not os.path.isdir(home_path):
os.makedirs(home_path)
if not os.path.isfile(hbest_path+".done"):
config = get_vae_config(args, model, dataset, home_path, BCE_Loss)
trainer = IterativeTrainer(config, args)
print(colored('Training from scratch', 'green'))
best_loss = 999999999
for epoch in range(1, config.max_epoch+1):
# Track the learning rates.
lrs = [float(param_group['lr']) for param_group in config.optim.param_groups]
config.logger.log('LRs', lrs, epoch)
config.logger.get_measure('LRs').legend = ['LR%d'%i for i in range(len(lrs))]
# One epoch of train and test.
trainer.run_epoch(epoch, phase='train')
trainer.run_epoch(epoch, phase='test')
train_loss = config.logger.get_measure('train_loss').mean_epoch()
test_loss = config.logger.get_measure('test_loss').mean_epoch()
config.logger.writer.add_scalar('train_loss', train_loss, epoch)
config.logger.writer.add_scalar('test_loss', test_loss, epoch)
config.scheduler.step(train_loss)
# vis in tensorboard
for (image, label) in config.valid_loader:
prediction = model(image.cuda()).data.cpu().squeeze().numpy()
prediction = (prediction - prediction.min()) / (prediction.max() - prediction.min())
if len(prediction.shape) > 3 and prediction.shape[1] == 3:
prediction = prediction.transpose((0,2,3,1)) # change to N W H C
N = min(prediction.shape[0], 5)
fig, ax = plt.subplots(N, 2)
image = image.data.squeeze().numpy()
image = (image - image.min()) / (image.max() - image.min())
if len(image.shape) > 3 and image.shape[1] == 3:
image = image.transpose((0,2,3,1))
for i in range(N):
ax[i, 0].imshow(prediction[i])
ax[i, 1].imshow(image[i])
config.logger.writer.add_figure('Vis', fig, epoch)
plt.close(fig)
break
if config.visualize:
# Show the average losses for all the phases in one figure.
config.logger.visualize_average_keys('.*_loss', 'Average Loss', trainer.visdom)
config.logger.visualize_average_keys('.*_accuracy', 'Average Accuracy', trainer.visdom)
config.logger.visualize_average('LRs', trainer.visdom)
# Save the logger for future reference.
torch.save(config.logger.measures, os.path.join(home_path, 'logger.pth'))
# Saving a checkpoint. Enable if needed!
# if args.save and epoch % 10 == 0:
# print('Saving a %s at iter %s'%(colored('snapshot', 'yellow'), colored('%d'%epoch, 'yellow')))
# torch.save(config.model.state_dict(), os.path.join(home_path, 'model.%d.pth'%epoch))
if args.save and test_loss < best_loss:
print('Updating the on file model with %s'%(colored('%.4f'%test_loss, 'red')))
best_loss = test_loss
torch.save(config.model.state_dict(), hbest_path)
torch.save({'finished':True}, hbest_path+".done")
torch.save(config.model.state_dict(), hlast_path)
if config.visualize:
trainer.visdom.save([trainer.visdom.env])
else:
print("Skipping %s"%(colored(home_path, 'yellow')))
|
[
"models.get_ref_model_path",
"utils.iterative_trainer.IterativeTrainer",
"utils.iterative_trainer.IterativeTrainerConfig",
"datasets.MirroredDataset",
"os.path.isfile",
"utils.logger.Logger",
"os.path.join",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"matplotlib.pyplot.subplots",
"torch.nn.BCEWithLogitsLoss",
"termcolor.colored",
"matplotlib.use",
"os.makedirs",
"os.path.isdir",
"models.autoencoders.VAE_Loss",
"torch.save"
] |
[((400, 421), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (414, 421), False, 'import matplotlib\n'), ((998, 1108), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=\n args.workers, pin_memory=True)\n', (1008, 1108), False, 'from torch.utils.data import DataLoader\n'), ((1123, 1218), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_ds'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(valid_ds, batch_size=args.batch_size, num_workers=args.workers,\n pin_memory=True)\n', (1133, 1218), False, 'from torch.utils.data import DataLoader\n'), ((1234, 1328), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(dataset, batch_size=args.batch_size, num_workers=args.workers,\n pin_memory=True)\n', (1244, 1328), False, 'from torch.utils.data import DataLoader\n'), ((1643, 1667), 'utils.iterative_trainer.IterativeTrainerConfig', 'IterativeTrainerConfig', ([], {}), '()\n', (1665, 1667), False, 'from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig\n'), ((2431, 2448), 'utils.logger.Logger', 'Logger', (['home_path'], {}), '(home_path)\n', (2437, 2448), False, 'from utils.logger import Logger\n'), ((2532, 2657), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['config.optim'], {'patience': '(10)', 'threshold': '(0.001)', 'min_lr': '(1e-06)', 'factor': '(0.1)', 'verbose': '(True)'}), '(config.optim, patience=10, threshold=\n 0.001, min_lr=1e-06, factor=0.1, verbose=True)\n', (2568, 2657), True, 'import torch.optim as optim\n'), ((3417, 3527), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=\n args.workers, pin_memory=True)\n', (3427, 3527), False, 'from torch.utils.data import DataLoader\n'), ((3542, 3637), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_ds'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(valid_ds, batch_size=args.batch_size, num_workers=args.workers,\n pin_memory=True)\n', (3552, 3637), False, 'from torch.utils.data import DataLoader\n'), ((3653, 3747), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.workers', 'pin_memory': '(True)'}), '(dataset, batch_size=args.batch_size, num_workers=args.workers,\n pin_memory=True)\n', (3663, 3747), False, 'from torch.utils.data import DataLoader\n'), ((3848, 3873), 'models.autoencoders.VAE_Loss', 'VAE_Loss', (['model', 'BCE_Loss'], {}), '(model, BCE_Loss)\n', (3856, 3873), False, 'from models.autoencoders import VAE_Loss\n'), ((3912, 3936), 'utils.iterative_trainer.IterativeTrainerConfig', 'IterativeTrainerConfig', ([], {}), '()\n', (3934, 3936), False, 'from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig\n'), ((4689, 4706), 'utils.logger.Logger', 'Logger', (['home_path'], {}), '(home_path)\n', (4695, 4706), False, 'from utils.logger import Logger\n'), ((4790, 4915), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['config.optim'], {'patience': '(10)', 'threshold': '(0.001)', 'min_lr': '(1e-06)', 'factor': '(0.1)', 'verbose': '(True)'}), '(config.optim, patience=10, threshold=\n 0.001, min_lr=1e-06, factor=0.1, verbose=True)\n', (4826, 4915), True, 'import torch.optim as optim\n'), ((5579, 5696), 'models.get_ref_model_path', 'Models.get_ref_model_path', (['args', 'model.__class__.__name__', 'dataset.name'], {'model_setup': '(True)', 'suffix_str': 'model.netid'}), '(args, model.__class__.__name__, dataset.name,\n model_setup=True, suffix_str=model.netid)\n', (5604, 5696), True, 'import models as Models\n'), ((5710, 5751), 'os.path.join', 'os.path.join', (['home_path', '"""model.best.pth"""'], {}), "(home_path, 'model.best.pth')\n", (5722, 5751), False, 'import os\n'), ((5769, 5810), 'os.path.join', 'os.path.join', (['home_path', '"""model.last.pth"""'], {}), "(home_path, 'model.last.pth')\n", (5781, 5810), False, 'import os\n'), ((9576, 9693), 'models.get_ref_model_path', 'Models.get_ref_model_path', (['args', 'model.__class__.__name__', 'dataset.name'], {'model_setup': '(True)', 'suffix_str': 'model.netid'}), '(args, model.__class__.__name__, dataset.name,\n model_setup=True, suffix_str=model.netid)\n', (9601, 9693), True, 'import models as Models\n'), ((9707, 9748), 'os.path.join', 'os.path.join', (['home_path', '"""model.best.pth"""'], {}), "(home_path, 'model.best.pth')\n", (9719, 9748), False, 'import os\n'), ((9766, 9807), 'os.path.join', 'os.path.join', (['home_path', '"""model.last.pth"""'], {}), "(home_path, 'model.last.pth')\n", (9778, 9807), False, 'import os\n'), ((5823, 5847), 'os.path.isdir', 'os.path.isdir', (['home_path'], {}), '(home_path)\n', (5836, 5847), False, 'import os\n'), ((5857, 5879), 'os.makedirs', 'os.makedirs', (['home_path'], {}), '(home_path)\n', (5868, 5879), False, 'import os\n'), ((5892, 5928), 'os.path.isfile', 'os.path.isfile', (["(hbest_path + '.done')"], {}), "(hbest_path + '.done')\n", (5906, 5928), False, 'import os\n'), ((6029, 6059), 'utils.iterative_trainer.IterativeTrainer', 'IterativeTrainer', (['config', 'args'], {}), '(config, args)\n', (6045, 6059), False, 'from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig\n'), ((9112, 9164), 'torch.save', 'torch.save', (["{'finished': True}", "(hbest_path + '.done')"], {}), "({'finished': True}, hbest_path + '.done')\n", (9122, 9164), False, 'import torch\n'), ((9820, 9844), 'os.path.isdir', 'os.path.isdir', (['home_path'], {}), '(home_path)\n', (9833, 9844), False, 'import os\n'), ((9854, 9876), 'os.makedirs', 'os.makedirs', (['home_path'], {}), '(home_path)\n', (9865, 9876), False, 'import os\n'), ((9889, 9925), 'os.path.isfile', 'os.path.isfile', (["(hbest_path + '.done')"], {}), "(hbest_path + '.done')\n", (9903, 9925), False, 'import os\n'), ((10018, 10048), 'utils.iterative_trainer.IterativeTrainer', 'IterativeTrainer', (['config', 'args'], {}), '(config, args)\n', (10034, 10048), False, 'from utils.iterative_trainer import IterativeTrainer, IterativeTrainerConfig\n'), ((13106, 13158), 'torch.save', 'torch.save', (["{'finished': True}", "(hbest_path + '.done')"], {}), "({'finished': True}, hbest_path + '.done')\n", (13116, 13158), False, 'import torch\n'), ((786, 841), 'termcolor.colored', 'colored', (["('Mirror augmenting %s' % dataset.name)", '"""green"""'], {}), "('Mirror augmenting %s' % dataset.name, 'green')\n", (793, 841), False, 'from termcolor import colored\n'), ((875, 900), 'datasets.MirroredDataset', 'MirroredDataset', (['train_ds'], {}), '(train_ds)\n', (890, 900), False, 'from datasets import MirroredDataset\n'), ((3205, 3260), 'termcolor.colored', 'colored', (["('Mirror augmenting %s' % dataset.name)", '"""green"""'], {}), "('Mirror augmenting %s' % dataset.name, 'green')\n", (3212, 3260), False, 'from termcolor import colored\n'), ((3294, 3319), 'datasets.MirroredDataset', 'MirroredDataset', (['train_ds'], {}), '(train_ds)\n', (3309, 3319), False, 'from datasets import MirroredDataset\n'), ((6074, 6115), 'termcolor.colored', 'colored', (['"""Training from scratch"""', '"""green"""'], {}), "('Training from scratch', 'green')\n", (6081, 6115), False, 'from termcolor import colored\n'), ((10063, 10104), 'termcolor.colored', 'colored', (['"""Training from scratch"""', '"""green"""'], {}), "('Training from scratch', 'green')\n", (10070, 10104), False, 'from termcolor import colored\n'), ((1470, 1492), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (1490, 1492), True, 'import torch.nn as nn\n'), ((1539, 1551), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1549, 1551), True, 'import torch.nn as nn\n'), ((7487, 7505), 'matplotlib.pyplot.subplots', 'plt.subplots', (['N', '(2)'], {}), '(N, 2)\n', (7499, 7505), True, 'import matplotlib.pyplot as plt\n'), ((7968, 7982), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7977, 7982), True, 'import matplotlib.pyplot as plt\n'), ((8486, 8523), 'os.path.join', 'os.path.join', (['home_path', '"""logger.pth"""'], {}), "(home_path, 'logger.pth')\n", (8498, 8523), False, 'import os\n'), ((9343, 9371), 'termcolor.colored', 'colored', (['home_path', '"""yellow"""'], {}), "(home_path, 'yellow')\n", (9350, 9371), False, 'from termcolor import colored\n'), ((11479, 11497), 'matplotlib.pyplot.subplots', 'plt.subplots', (['N', '(2)'], {}), '(N, 2)\n', (11491, 11497), True, 'import matplotlib.pyplot as plt\n'), ((11962, 11976), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11971, 11976), True, 'import matplotlib.pyplot as plt\n'), ((12480, 12517), 'os.path.join', 'os.path.join', (['home_path', '"""logger.pth"""'], {}), "(home_path, 'logger.pth')\n", (12492, 12517), False, 'import os\n'), ((13337, 13365), 'termcolor.colored', 'colored', (['home_path', '"""yellow"""'], {}), "(home_path, 'yellow')\n", (13344, 13365), False, 'from termcolor import colored\n'), ((8956, 8990), 'termcolor.colored', 'colored', (["('%.4f' % test_loss)", '"""red"""'], {}), "('%.4f' % test_loss, 'red')\n", (8963, 8990), False, 'from termcolor import colored\n'), ((12950, 12984), 'termcolor.colored', 'colored', (["('%.4f' % test_loss)", '"""red"""'], {}), "('%.4f' % test_loss, 'red')\n", (12957, 12984), False, 'from termcolor import colored\n')]
|
import tornado.ioloop
import tornado.web
from tornado.platform.asyncio import AsyncIOMainLoop
from base_plugin import BasePlugin
from utilities import path
class WebHandler(tornado.web.RequestHandler):
def get(self, *args, **kwargs):
players = [player for player in
self.player_manager.players.values()]
self.render("static/who.html", title="Who's online",
players=players)
class WebManager(BasePlugin):
name = "web_manager"
depends = ['player_manager']
def activate(self):
WebHandler.web_manager = self
WebHandler.factory = self.factory
WebHandler.player_manager = self.plugins.player_manager
AsyncIOMainLoop().install()
application.listen(8888)
application = tornado.web.Application([
(r"/", WebHandler),
(r'/css/(.*)', tornado.web.StaticFileHandler, {'path': str(path
/ "plugins"
/ "static"
/ "css")})])
|
[
"tornado.platform.asyncio.AsyncIOMainLoop"
] |
[((704, 721), 'tornado.platform.asyncio.AsyncIOMainLoop', 'AsyncIOMainLoop', ([], {}), '()\n', (719, 721), False, 'from tornado.platform.asyncio import AsyncIOMainLoop\n')]
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2021-2022 Valory AG
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the `aea create` sub-command."""
import json
import os
import shutil
import tempfile
from pathlib import Path
from typing import Dict
from unittest import TestCase
from unittest.mock import patch
import jsonschema
import pytest
import yaml
from jsonschema import Draft4Validator
from packaging.version import Version
import aea
from aea.cli import cli
from aea.configurations.constants import DEFAULT_AEA_CONFIG_FILE
from aea.configurations.data_types import PublicId
from aea.configurations.loader import ConfigLoader, make_jsonschema_base_uri
from packages.open_aea.protocols.signing.message import SigningMessage
from tests.conftest import (
AGENT_CONFIGURATION_SCHEMA,
AUTHOR,
CLI_LOG_OPTION,
CONFIGURATION_SCHEMA_DIR,
CliRunner,
ROOT_DIR,
)
class TestCreate:
"""Test that the command 'aea create <agent_name>' works as expected."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.schema = json.load(open(AGENT_CONFIGURATION_SCHEMA))
cls.resolver = jsonschema.RefResolver(
make_jsonschema_base_uri(Path(CONFIGURATION_SCHEMA_DIR).absolute()),
cls.schema,
)
cls.validator = Draft4Validator(cls.schema, resolver=cls.resolver)
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
os.chdir(cls.t)
cls.cli_config_file = f"{cls.t}/cli_config.yaml"
cls.cli_config_patch = patch(
"aea.cli.utils.config.CLI_CONFIG_PATH", cls.cli_config_file
)
cls.cli_config_patch.start()
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0, result.stdout
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
cls.agent_config = cls._load_config_file(cls.agent_name)
@classmethod
def _load_config_file(cls, agent_name) -> Dict:
"""Load a config file."""
agent_config_file = Path(agent_name, DEFAULT_AEA_CONFIG_FILE) # type: ignore
file_pointer = open(agent_config_file, mode="r", encoding="utf-8")
agent_config_instance = yaml.safe_load(file_pointer)
return agent_config_instance
def test_exit_code_equal_to_zero(self):
"""Assert that the exit code is equal to zero (i.e. success)."""
assert self.result.exit_code == 0
def test_agent_directory_path_exists(self):
"""Check that the agent's directory has been created."""
agent_dir = Path(self.agent_name)
assert agent_dir.exists()
assert agent_dir.is_dir()
def test_configuration_file_has_been_created(self):
"""Check that an agent's configuration file has been created."""
agent_config_file = Path(self.agent_name, DEFAULT_AEA_CONFIG_FILE)
assert agent_config_file.exists()
assert agent_config_file.is_file()
def test_configuration_file_is_compliant_to_schema(self):
"""Check that the agent's configuration file is compliant with the schema."""
try:
self.validator.validate(instance=self.agent_config)
except jsonschema.exceptions.ValidationError as e:
pytest.fail(
"Configuration file is not compliant with the schema. Exception: {}".format(
str(e)
)
)
def test_aea_version_is_correct(self):
"""Check that the aea version in the configuration file is correct, i.e. the same of the installed package."""
expected_aea_version = Version(aea.__version__)
version_no_micro = Version(
f"{expected_aea_version.major}.{expected_aea_version.minor}.0"
)
version_no_micro = (
version_no_micro
if version_no_micro < expected_aea_version
else expected_aea_version
)
version_next_minor = Version(f"{expected_aea_version.major + 1}.0.0")
version_range = f">={version_no_micro}, <{version_next_minor}"
assert self.agent_config["aea_version"] == version_range
def test_agent_name_is_correct(self):
"""Check that the agent name in the configuration file is correct."""
assert self.agent_config["agent_name"] == self.agent_name
def test_authors_field_is_empty_string(self):
"""Check that the 'authors' field in the config file is the empty string."""
assert self.agent_config["author"] == AUTHOR
def test_connections_contains_nothing(self):
"""Check that the 'connections' list contains only the 'stub' connection."""
assert self.agent_config["connections"] == []
def test_default_connection_field_is_empty(self):
"""Check that the 'default_connection' is not specified."""
assert self.agent_config["default_connection"] is None
def test_license_field_is_empty_string(self):
"""Check that the 'license' is the empty string."""
assert (
self.agent_config["license"] == aea.configurations.constants.DEFAULT_LICENSE
)
def test_protocols_field_is_not_empty_list(self):
"""Check that the 'protocols' field is a list with the 'default' protocol."""
assert [
str(PublicId.from_str(p).without_hash())
for p in self.agent_config["protocols"]
] == [str(SigningMessage.protocol_id)]
def test_skills_field_is_empty_list(self):
"""Check that the 'skills' field is a list with the 'error' skill."""
assert self.agent_config["skills"] == []
def test_version_field_is_equal_to_0_1_0(self):
"""Check that the 'version' field is equal to the string '0.1.0'."""
assert self.agent_config["version"] == "0.1.0"
def test_vendor_content(self):
"""Check the content of vendor directory is as expected."""
vendor_dir = Path(self.agent_name, "vendor")
assert vendor_dir.exists()
assert set(vendor_dir.iterdir()) == {
vendor_dir / "open_aea",
vendor_dir / "__init__.py",
}
# assert that every subdirectory of vendor/fetchai is a Python package
# (i.e. that contains __init__.py)
for package_dir in (vendor_dir / "open_aea").iterdir():
assert (package_dir / "__init__.py").exists()
def test_vendor_protocols_contains_signing_protocol(self):
"""Check that the vendor protocols directory contains the signing protocol."""
protocol_dirpath = Path(
self.agent_name, "vendor", "open_aea", "protocols", "signing"
)
assert protocol_dirpath.exists()
assert protocol_dirpath.is_dir()
def test_protocols_directory_content(self):
"""Test the content of the 'protocols' directory."""
dir = Path(self.t, self.agent_name, "protocols")
assert dir.exists()
assert dir.is_dir()
assert set(dir.iterdir()) == {dir / "__init__.py"}
def test_connections_directory_content(self):
"""Test the content of the 'connections' directory."""
dir = Path(self.t, self.agent_name, "connections")
assert dir.exists()
assert dir.is_dir()
assert set(dir.iterdir()) == {dir / "__init__.py"}
def test_skills_directory_content(self):
"""Test the content of the 'skills' directory."""
dir = Path(self.t, self.agent_name, "skills")
assert dir.exists()
assert dir.is_dir()
assert set(dir.iterdir()) == {dir / "__init__.py"}
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.cli_config_patch.start()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenDirectoryAlreadyExists:
"""Test that 'aea create' sub-command fails when the directory with the agent name in input already exists."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
os.chdir(cls.t)
# create a directory with the agent name -> make 'aea create fail.
os.mkdir(cls.agent_name)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_log_error_message(self):
"""Test that the log error message is fixed.
The expected message is: 'Directory already exist. Aborting...'
"""
s = "Directory already exist. Aborting..."
assert self.result.exception.message == s
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenConfigFileIsNotCompliant:
"""Test that 'aea create' sub-command fails when the generated configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
# change the serialization of the AgentConfig class so to make the parsing to fail.
cls.patch = patch.object(
aea.configurations.base.AgentConfig, "json", return_value={"hello": "world"}
)
cls.patch.start()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_agent_folder_is_not_created(self):
"""Test that the agent folder is removed."""
assert not Path(self.agent_name).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.patch.stop()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenExceptionOccurs:
"""Test that 'aea create' sub-command fails when the generated configuration file is not compliant with the schema."""
@classmethod
def setup_class(cls):
"""Set up the test class."""
cls.runner = CliRunner()
cls.agent_name = "myagent"
# change the serialization of the AgentConfig class so to make the parsing to fail.
cls.patch = patch.object(ConfigLoader, "dump", side_effect=Exception)
cls.patch.start()
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
os.chdir(cls.t)
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_agent_folder_is_not_created(self):
"""Test that the agent folder is removed."""
assert not Path(self.agent_name).exists()
@classmethod
def teardown_class(cls):
"""Tear the test down."""
cls.patch.stop()
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class TestCreateFailsWhenAlreadyInAEAProject:
"""Test that 'aea create' sub-command fails when it is called within an AEA project."""
@classmethod
def setup_class(cls):
"""Set the test up."""
cls.cwd = os.getcwd()
cls.t = tempfile.mkdtemp()
dir_path = Path("packages")
tmp_dir = cls.t / dir_path
src_dir = cls.cwd / Path(ROOT_DIR, dir_path)
shutil.copytree(str(src_dir), str(tmp_dir))
os.chdir(cls.t)
cls.runner = CliRunner()
cls.agent_name = "myagent"
result = cls.runner.invoke(
cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR],
)
assert result.exit_code == 0
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
assert cls.result.exit_code == 0
# calling 'aea create myagent' again within an AEA project - recursively.
os.chdir(cls.agent_name)
cls.result = cls.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", cls.agent_name],
standalone_mode=False,
)
def test_exit_code_equal_to_1(self):
"""Test that the error code is equal to 1 (i.e. catchall for general errors)."""
assert self.result.exit_code == 1
def test_log_error_message(self):
"""Test that the log error message is fixed.
The expected message is: "The current folder is already an AEA project. Please move to the parent folder.".
"""
s = "The current folder is already an AEA project. Please move to the parent folder."
assert self.result.exception.message == s
@classmethod
def teardown_class(cls):
"""Tear the test down."""
os.chdir(cls.cwd)
try:
shutil.rmtree(cls.t)
except (OSError, IOError):
pass
class CreateCommandTestCase(TestCase):
"""Test case for CLI create command."""
def setUp(self):
"""Set it up."""
self.runner = CliRunner()
def test_create_no_init(self):
"""Test for CLI create no init result."""
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--remote", "agent_name", "--author=some"],
standalone_mode=False,
)
self.assertEqual(
result.exception.message,
"Author is not set up. Please use 'aea init' to initialize.",
)
@patch("aea.cli.create.get_or_create_cli_config", return_value={})
def test_create_no_author_local(self, *mocks):
"""Test for CLI create no author local result."""
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "create", "--local", "agent_name"],
standalone_mode=False,
)
expected_message = (
"The AEA configurations are not initialized. "
"Uses `aea init` before continuing or provide optional argument `--author`."
)
self.assertEqual(result.exception.message, expected_message)
|
[
"os.mkdir",
"unittest.mock.patch.object",
"os.getcwd",
"unittest.mock.patch",
"pathlib.Path",
"tempfile.mkdtemp",
"jsonschema.Draft4Validator",
"tests.conftest.CliRunner",
"yaml.safe_load",
"packaging.version.Version",
"shutil.rmtree",
"aea.configurations.data_types.PublicId.from_str",
"os.chdir"
] |
[((16549, 16614), 'unittest.mock.patch', 'patch', (['"""aea.cli.create.get_or_create_cli_config"""'], {'return_value': '{}'}), "('aea.cli.create.get_or_create_cli_config', return_value={})\n", (16554, 16614), False, 'from unittest.mock import patch\n'), ((2071, 2121), 'jsonschema.Draft4Validator', 'Draft4Validator', (['cls.schema'], {'resolver': 'cls.resolver'}), '(cls.schema, resolver=cls.resolver)\n', (2086, 2121), False, 'from jsonschema import Draft4Validator\n'), ((2144, 2155), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (2153, 2155), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((2209, 2220), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2218, 2220), False, 'import os\n'), ((2237, 2255), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2253, 2255), False, 'import tempfile\n'), ((2275, 2291), 'pathlib.Path', 'Path', (['"""packages"""'], {}), "('packages')\n", (2279, 2291), False, 'from pathlib import Path\n'), ((2440, 2455), 'os.chdir', 'os.chdir', (['cls.t'], {}), '(cls.t)\n', (2448, 2455), False, 'import os\n'), ((2544, 2610), 'unittest.mock.patch', 'patch', (['"""aea.cli.utils.config.CLI_CONFIG_PATH"""', 'cls.cli_config_file'], {}), "('aea.cli.utils.config.CLI_CONFIG_PATH', cls.cli_config_file)\n", (2549, 2610), False, 'from unittest.mock import patch\n'), ((3211, 3252), 'pathlib.Path', 'Path', (['agent_name', 'DEFAULT_AEA_CONFIG_FILE'], {}), '(agent_name, DEFAULT_AEA_CONFIG_FILE)\n', (3215, 3252), False, 'from pathlib import Path\n'), ((3376, 3404), 'yaml.safe_load', 'yaml.safe_load', (['file_pointer'], {}), '(file_pointer)\n', (3390, 3404), False, 'import yaml\n'), ((3736, 3757), 'pathlib.Path', 'Path', (['self.agent_name'], {}), '(self.agent_name)\n', (3740, 3757), False, 'from pathlib import Path\n'), ((3984, 4030), 'pathlib.Path', 'Path', (['self.agent_name', 'DEFAULT_AEA_CONFIG_FILE'], {}), '(self.agent_name, DEFAULT_AEA_CONFIG_FILE)\n', (3988, 4030), False, 'from pathlib import Path\n'), ((4772, 4796), 'packaging.version.Version', 'Version', (['aea.__version__'], {}), '(aea.__version__)\n', (4779, 4796), False, 'from packaging.version import Version\n'), ((4824, 4895), 'packaging.version.Version', 'Version', (['f"""{expected_aea_version.major}.{expected_aea_version.minor}.0"""'], {}), "(f'{expected_aea_version.major}.{expected_aea_version.minor}.0')\n", (4831, 4895), False, 'from packaging.version import Version\n'), ((5108, 5156), 'packaging.version.Version', 'Version', (['f"""{expected_aea_version.major + 1}.0.0"""'], {}), "(f'{expected_aea_version.major + 1}.0.0')\n", (5115, 5156), False, 'from packaging.version import Version\n'), ((7066, 7097), 'pathlib.Path', 'Path', (['self.agent_name', '"""vendor"""'], {}), "(self.agent_name, 'vendor')\n", (7070, 7097), False, 'from pathlib import Path\n'), ((7689, 7756), 'pathlib.Path', 'Path', (['self.agent_name', '"""vendor"""', '"""open_aea"""', '"""protocols"""', '"""signing"""'], {}), "(self.agent_name, 'vendor', 'open_aea', 'protocols', 'signing')\n", (7693, 7756), False, 'from pathlib import Path\n'), ((7985, 8027), 'pathlib.Path', 'Path', (['self.t', 'self.agent_name', '"""protocols"""'], {}), "(self.t, self.agent_name, 'protocols')\n", (7989, 8027), False, 'from pathlib import Path\n'), ((8271, 8315), 'pathlib.Path', 'Path', (['self.t', 'self.agent_name', '"""connections"""'], {}), "(self.t, self.agent_name, 'connections')\n", (8275, 8315), False, 'from pathlib import Path\n'), ((8549, 8588), 'pathlib.Path', 'Path', (['self.t', 'self.agent_name', '"""skills"""'], {}), "(self.t, self.agent_name, 'skills')\n", (8553, 8588), False, 'from pathlib import Path\n'), ((8830, 8847), 'os.chdir', 'os.chdir', (['cls.cwd'], {}), '(cls.cwd)\n', (8838, 8847), False, 'import os\n'), ((9214, 9225), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (9223, 9225), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((9280, 9291), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9289, 9291), False, 'import os\n'), ((9308, 9326), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9324, 9326), False, 'import tempfile\n'), ((9346, 9362), 'pathlib.Path', 'Path', (['"""packages"""'], {}), "('packages')\n", (9350, 9362), False, 'from pathlib import Path\n'), ((9511, 9526), 'os.chdir', 'os.chdir', (['cls.t'], {}), '(cls.t)\n', (9519, 9526), False, 'import os\n'), ((9611, 9635), 'os.mkdir', 'os.mkdir', (['cls.agent_name'], {}), '(cls.agent_name)\n', (9619, 9635), False, 'import os\n'), ((10505, 10522), 'os.chdir', 'os.chdir', (['cls.cwd'], {}), '(cls.cwd)\n', (10513, 10522), False, 'import os\n'), ((10899, 10910), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (10908, 10910), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((11059, 11154), 'unittest.mock.patch.object', 'patch.object', (['aea.configurations.base.AgentConfig', '"""json"""'], {'return_value': "{'hello': 'world'}"}), "(aea.configurations.base.AgentConfig, 'json', return_value={\n 'hello': 'world'})\n", (11071, 11154), False, 'from unittest.mock import patch\n'), ((11217, 11228), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11226, 11228), False, 'import os\n'), ((11245, 11263), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11261, 11263), False, 'import tempfile\n'), ((11283, 11299), 'pathlib.Path', 'Path', (['"""packages"""'], {}), "('packages')\n", (11287, 11299), False, 'from pathlib import Path\n'), ((11448, 11463), 'os.chdir', 'os.chdir', (['cls.t'], {}), '(cls.t)\n', (11456, 11463), False, 'import os\n'), ((12233, 12250), 'os.chdir', 'os.chdir', (['cls.cwd'], {}), '(cls.cwd)\n', (12241, 12250), False, 'import os\n'), ((12618, 12629), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (12627, 12629), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((12778, 12835), 'unittest.mock.patch.object', 'patch.object', (['ConfigLoader', '"""dump"""'], {'side_effect': 'Exception'}), "(ConfigLoader, 'dump', side_effect=Exception)\n", (12790, 12835), False, 'from unittest.mock import patch\n'), ((12881, 12892), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12890, 12892), False, 'import os\n'), ((12909, 12927), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (12925, 12927), False, 'import tempfile\n'), ((12947, 12963), 'pathlib.Path', 'Path', (['"""packages"""'], {}), "('packages')\n", (12951, 12963), False, 'from pathlib import Path\n'), ((13112, 13127), 'os.chdir', 'os.chdir', (['cls.t'], {}), '(cls.t)\n', (13120, 13127), False, 'import os\n'), ((13896, 13913), 'os.chdir', 'os.chdir', (['cls.cwd'], {}), '(cls.cwd)\n', (13904, 13913), False, 'import os\n'), ((14245, 14256), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14254, 14256), False, 'import os\n'), ((14273, 14291), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (14289, 14291), False, 'import tempfile\n'), ((14311, 14327), 'pathlib.Path', 'Path', (['"""packages"""'], {}), "('packages')\n", (14315, 14327), False, 'from pathlib import Path\n'), ((14476, 14491), 'os.chdir', 'os.chdir', (['cls.t'], {}), '(cls.t)\n', (14484, 14491), False, 'import os\n'), ((14514, 14525), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (14523, 14525), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((15022, 15046), 'os.chdir', 'os.chdir', (['cls.agent_name'], {}), '(cls.agent_name)\n', (15030, 15046), False, 'import os\n'), ((15844, 15861), 'os.chdir', 'os.chdir', (['cls.cwd'], {}), '(cls.cwd)\n', (15852, 15861), False, 'import os\n'), ((16114, 16125), 'tests.conftest.CliRunner', 'CliRunner', ([], {}), '()\n', (16123, 16125), False, 'from tests.conftest import AGENT_CONFIGURATION_SCHEMA, AUTHOR, CLI_LOG_OPTION, CONFIGURATION_SCHEMA_DIR, CliRunner, ROOT_DIR\n'), ((2355, 2379), 'pathlib.Path', 'Path', (['ROOT_DIR', 'dir_path'], {}), '(ROOT_DIR, dir_path)\n', (2359, 2379), False, 'from pathlib import Path\n'), ((8873, 8893), 'shutil.rmtree', 'shutil.rmtree', (['cls.t'], {}), '(cls.t)\n', (8886, 8893), False, 'import shutil\n'), ((9426, 9450), 'pathlib.Path', 'Path', (['ROOT_DIR', 'dir_path'], {}), '(ROOT_DIR, dir_path)\n', (9430, 9450), False, 'from pathlib import Path\n'), ((10548, 10568), 'shutil.rmtree', 'shutil.rmtree', (['cls.t'], {}), '(cls.t)\n', (10561, 10568), False, 'import shutil\n'), ((11363, 11387), 'pathlib.Path', 'Path', (['ROOT_DIR', 'dir_path'], {}), '(ROOT_DIR, dir_path)\n', (11367, 11387), False, 'from pathlib import Path\n'), ((12276, 12296), 'shutil.rmtree', 'shutil.rmtree', (['cls.t'], {}), '(cls.t)\n', (12289, 12296), False, 'import shutil\n'), ((13027, 13051), 'pathlib.Path', 'Path', (['ROOT_DIR', 'dir_path'], {}), '(ROOT_DIR, dir_path)\n', (13031, 13051), False, 'from pathlib import Path\n'), ((13939, 13959), 'shutil.rmtree', 'shutil.rmtree', (['cls.t'], {}), '(cls.t)\n', (13952, 13959), False, 'import shutil\n'), ((14391, 14415), 'pathlib.Path', 'Path', (['ROOT_DIR', 'dir_path'], {}), '(ROOT_DIR, dir_path)\n', (14395, 14415), False, 'from pathlib import Path\n'), ((15887, 15907), 'shutil.rmtree', 'shutil.rmtree', (['cls.t'], {}), '(cls.t)\n', (15900, 15907), False, 'import shutil\n'), ((12088, 12109), 'pathlib.Path', 'Path', (['self.agent_name'], {}), '(self.agent_name)\n', (12092, 12109), False, 'from pathlib import Path\n'), ((13751, 13772), 'pathlib.Path', 'Path', (['self.agent_name'], {}), '(self.agent_name)\n', (13755, 13772), False, 'from pathlib import Path\n'), ((1969, 1999), 'pathlib.Path', 'Path', (['CONFIGURATION_SCHEMA_DIR'], {}), '(CONFIGURATION_SCHEMA_DIR)\n', (1973, 1999), False, 'from pathlib import Path\n'), ((6445, 6465), 'aea.configurations.data_types.PublicId.from_str', 'PublicId.from_str', (['p'], {}), '(p)\n', (6462, 6465), False, 'from aea.configurations.data_types import PublicId\n')]
|
import click
from click.testing import CliRunner
from aiotasks.actions.cli import worker
import aiotasks.actions.cli
def _launch_aiotasks_worker_in_console(blah, **kwargs):
click.echo("ok")
def test_cli_worker_runs_show_help():
runner = CliRunner()
result = runner.invoke(worker)
assert 'Usage: worker [OPTIONS]' in result.output
def test_cli_worker_runs_ok(monkeypatch):
# Patch the launch of: launch_aiotasks_info_in_console
aiotasks.actions.cli.launch_aiotasks_worker_in_console = _launch_aiotasks_worker_in_console
runner = CliRunner()
result = runner.invoke(worker, ["-A", "package"])
assert 'ok' in result.output
|
[
"click.testing.CliRunner",
"click.echo"
] |
[((182, 198), 'click.echo', 'click.echo', (['"""ok"""'], {}), "('ok')\n", (192, 198), False, 'import click\n'), ((252, 263), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (261, 263), False, 'from click.testing import CliRunner\n'), ((567, 578), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (576, 578), False, 'from click.testing import CliRunner\n')]
|
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from .middleware import middleware
from .routers import auth, blog
from .db import init_db
import os
APP_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_DATABASE_URL = "sqlite:///./sql_app.db"
def get_app(config: dict | None = None):
app = FastAPI(middleware = middleware)
url = DEFAULT_DATABASE_URL
if config:
url = config.get('database_url', DEFAULT_DATABASE_URL)
app.state.database_url = url
init_db(app)
app.mount('/static', StaticFiles(directory=os.path.join(APP_DIR, 'static')), name='static')
app.include_router(auth.router)
app.include_router(blog.router)
return app
app = get_app()
|
[
"os.path.abspath",
"os.path.join",
"fastapi.FastAPI"
] |
[((200, 225), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (215, 225), False, 'import os\n'), ((333, 363), 'fastapi.FastAPI', 'FastAPI', ([], {'middleware': 'middleware'}), '(middleware=middleware)\n', (340, 363), False, 'from fastapi import FastAPI\n'), ((583, 614), 'os.path.join', 'os.path.join', (['APP_DIR', '"""static"""'], {}), "(APP_DIR, 'static')\n", (595, 614), False, 'import os\n')]
|
# Copyright (c) ElementAI and its affiliates.
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Script to train DCGAN on MNIST, adaptted from https://github.com/pytorch/examples/blob/master/dcgan/main.py"""
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.nn.utils import spectral_norm
from tensorboardX import SummaryWriter
import json
from gan_eval_metrics import mnist_inception_score
from lib.optim import ExtraAdam
import numpy as np
from torch.utils.data import Subset
from plot_path_tools import compute_path_stats, plot_path_stats, compute_eigenvalues,\
plot_eigenvalues
import time
import pickle
from lib import models
import torch.nn.functional as F
def load_mnist(batchSize, imageSize=32, train=True, workers=2, dataroot='./data', subset=None):
dataset = dset.MNIST(root=dataroot, train=train, download=True,
transform=transforms.Compose([
transforms.Resize(imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
]))
if subset is not None:
idx = np.arange(len(dataset))
np.random.RandomState(123).shuffle(idx)
dataset = Subset(dataset, idx[:subset])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batchSize,
shuffle=True, num_workers=int(workers))
return dataloader
def normalize_module2D(module, norm, dim):
"""
Applies normalization `norm` to `module`.
Optionally uses `dim`
Returns a list of modules.
"""
if norm == 'none':
return [module]
elif norm == 'batch':
return [module, nn.BatchNorm2d(dim)]
elif norm == 'instance':
return [module, nn.InstanceNorm2d(dim)]
elif norm == 'layer':
return [module, nn.GroupNorm(1, dim)]
elif norm == 'spectral':
return [spectral_norm(module)]
else:
raise NotImplementedError('normalization [%s] is not found' % norm)
class Generator(nn.Module):
def __init__(self, ngpu, nc, ngf, nz):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=True),
nn.Tanh()
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
class Discriminator(models.Discriminator):
def __init__(self, ngpu, nc, ndf, norm='spectral', sigmoid=True):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.norm = norm
self.sigmoid = sigmoid
# NOTE: made a special cose for BN because we don't normalize first layer
# I kept it this way to be able to load pre-trained models
if self.norm != 'batch':
self.main = nn.Sequential(
*normalize_module2D(nn.Conv2d(nc, ndf, 4, 2, 1, bias=True), norm, ndf),
nn.LeakyReLU(0.2, inplace=True),
*normalize_module2D(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=True), norm, ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
*normalize_module2D(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=True), norm, ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=True),
)
else:
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=True),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=True),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=True),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=True),
)
def forward(self, input):
if input.is_cuda and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
if self.sigmoid:
output = torch.sigmoid(output)
return output.view(-1, 1).squeeze(1)
def weights_init(m):
""" custom weights initialization called on netG and netD """
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def define_model_loss(config):
"""returns dis/gen loss functions based on the model"""
if config.model == 'dcgan':
return dcgan_loss_dis, dcgan_loss_gen
elif config.model in ['wgan', 'wgan_gp']:
return wgan_loss_dis, wgan_loss_gen
# elif config.model == 'wgan_gp':
# return functools.partial(wgan_loss_dis, grad_penalty=True, gp_lambda=config.gp_lambda), wgan_loss_gen
else:
raise NotImplementedError('%s model is not implemented!' % config.model)
# def dcgan_loss_dis(x_real, x_fake, netD, device):
# p_real, p_gen = netD(x_real), netD(x_fake)
# criterion = nn.BCELoss()
# real_label = torch.full((p_real.size(0),), 1, device=device)
# fake_label = torch.full((p_real.size(0),), 0, device=device)
# errD_real = criterion(p_real, real_label)
# errD_gen = criterion(p_gen, fake_label)
# dis_loss = errD_real + errD_gen
# return dis_loss, p_real, p_gen
# def dcgan_loss_gen(x_fake, netD, device):
# p_gen = netD(x_fake)
# criterion = nn.BCELoss()
# real_label = torch.full((p_gen.size(0),), 1, device=device)
# gen_loss = criterion(p_gen, real_label)
# return gen_loss, p_gen
def dcgan_loss_dis(x_real, x_fake, netD, device):
p_real, p_gen = netD(x_real), netD(x_fake)
dis_loss = F.softplus(-p_real).mean() + F.softplus(p_gen).mean()
return dis_loss, p_real, p_gen
def dcgan_loss_gen(x_fake, netD, device):
p_gen = netD(x_fake)
gen_loss = F.softplus(-p_gen).mean()
return gen_loss, p_gen
def wgan_loss_gen(x_fake, netD, device):
score_gen = netD(x_fake)
gen_loss = -score_gen.mean()
return gen_loss, score_gen
def wgan_loss_dis(x_real, x_fake, netD, device):
score_real, score_gen = netD(x_real), netD(x_fake)
dis_loss = score_gen.mean() - score_real.mean()
# if grad_penalty:
# dis_loss += gp_lambda * netD.get_penalty(x_real.detach(), x_fake.detach())
return dis_loss, score_real, score_gen
def main(config):
print("Hyper-params:")
print(config)
# create exp folder and save config
exp_dir = os.path.join(config.exp_dir, config.exp_name)
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
plots_dir = os.path.join(exp_dir, 'extra_plots')
if not os.path.exists(plots_dir):
os.makedirs(plots_dir)
if config.manualSeed is None:
config.manualSeed = random.randint(1, 10000)
print("Random Seed: ", config.manualSeed)
random.seed(config.manualSeed)
torch.manual_seed(config.manualSeed)
np.random.seed(config.manualSeed)
if torch.cuda.is_available():
torch.cuda.manual_seed(config.manualSeed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device {0!s}".format(device))
dataloader = load_mnist(config.batchSize)
eval_dataloader = load_mnist(config.batchSize, subset=5000)
eig_dataloader = load_mnist(1000, train=True, subset=1000)
fixed_noise = torch.randn(64, config.nz, 1, 1, device=device)
# define the model
netG = Generator(config.ngpu, config.nc, config.ngf, config.nz).to(device)
netG.apply(weights_init)
if config.netG != '':
print('loading generator from %s' % config.netG)
netG.load_state_dict(torch.load(config.netG)['state_gen'])
print(netG)
# sigmoid = config.model == 'dcgan'
sigmoid = False
netD = Discriminator(config.ngpu, config.nc, config.ndf, config.dnorm, sigmoid).to(device)
netD.apply(weights_init)
if config.netD != '':
print('loading discriminator from %s' % config.netD)
netD.load_state_dict(torch.load(config.netD)['state_dis'])
print(netD)
# evaluation G and D
evalG = Generator(config.ngpu, config.nc, config.ngf, config.nz).to(device)
evalG.apply(weights_init)
evalD = Discriminator(config.ngpu, config.nc, config.ndf, config.dnorm, sigmoid).to(device)
evalD.apply(weights_init)
# defining the loss function
model_loss_dis, model_loss_gen = define_model_loss(config)
# # defining learning rates based on the model
# if config.model in ['wgan', 'wgan_gp']:
# config.lrG = config.lrD / config.n_critic
# warnings.warn('modifying learning rates to lrD=%f, lrG=%f' % (config.lrD, config.lrG))
if config.lrG is None:
config.lrG = config.lrD
# setup optimizer
if config.optimizer == 'adam':
optimizerD = optim.Adam(netD.parameters(), lr=config.lrD, betas=(config.beta1, config.beta2))
optimizerG = optim.Adam(netG.parameters(), lr=config.lrG, betas=(config.beta1, config.beta2))
elif config.optimizer == 'extraadam':
optimizerD = ExtraAdam(netD.parameters(), lr=config.lrD)
optimizerG = ExtraAdam(netG.parameters(), lr=config.lrG)
elif config.optimizer == 'rmsprop':
optimizerD = optim.RMSprop(netD.parameters(), lr=config.lrD)
optimizerG = optim.RMSprop(netG.parameters(), lr=config.lrG)
elif config.optimizer == 'sgd':
optimizerD = optim.SGD(netD.parameters(), lr=config.lrD, momentum=config.beta1)
optimizerG = optim.SGD(netG.parameters(), lr=config.lrG, momentum=config.beta1)
else:
raise ValueError('Optimizer %s not supported' % config.optimizer)
with open(os.path.join(exp_dir, 'config.json'), 'w') as f:
json.dump(vars(config), f, indent=4)
summary_writer = SummaryWriter(log_dir=exp_dir)
global_step = 0
torch.save({'state_gen': netG.state_dict(),
'state_dis': netD.state_dict()},
'%s/checkpoint_step_%06d.pth' % (exp_dir, global_step))
# compute and save eigen values function
def comp_and_save_eigs(step, n_eigs=20):
eig_checkpoint = torch.load('%s/checkpoint_step_%06d.pth' % (exp_dir, step),
map_location=device)
evalG.load_state_dict(eig_checkpoint['state_gen'])
evalD.load_state_dict(eig_checkpoint['state_dis'])
gen_eigs, dis_eigs, game_eigs = \
compute_eigenvalues(evalG, evalD, eig_dataloader, config,
model_loss_gen, model_loss_dis,
device, verbose=True, n_eigs=n_eigs)
np.savez(os.path.join(plots_dir, 'eigenvalues_%d' % step),
gen_eigs=gen_eigs, dis_eigs=dis_eigs, game_eigs=game_eigs)
return gen_eigs, dis_eigs, game_eigs
if config.compute_eig:
# eigenvalues of initialization
gen_eigs_init, dis_eigs_init, game_eigs_init = comp_and_save_eigs(0)
for epoch in range(config.niter):
for i, data in enumerate(dataloader, 0):
global_step += 1
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
x_real = data[0].to(device)
batch_size = x_real.size(0)
noise = torch.randn(batch_size, config.nz, 1, 1, device=device)
x_fake = netG(noise)
errD, D_x, D_G_z1 = model_loss_dis(x_real, x_fake.detach(), netD, device)
# gradient penalty
if config.model == 'wgan_gp':
errD += config.gp_lambda * netD.get_penalty(x_real.detach(), x_fake.detach())
errD.backward()
D_x = D_x.mean().item()
D_G_z1 = D_G_z1.mean().item()
if config.optimizer == "extraadam":
if i % 2 == 0:
optimizerD.extrapolation()
else:
optimizerD.step()
else:
optimizerD.step()
# weight clipping
if config.model == 'wgan':
for p in netD.parameters():
p.data.clamp_(-config.clip, config.clip)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
if config.model == 'dcgan' or (config.model in ['wgan', 'wgan_gp'] and i % config.n_critic == 0):
netG.zero_grad()
errG, D_G_z2 = model_loss_gen(x_fake, netD, device)
errG.backward()
D_G_z2 = D_G_z2.mean().item()
if config.optimizer == "extraadam":
if i % 2 == 0:
optimizerG.extrapolation()
else:
optimizerG.step()
else:
optimizerG.step()
if global_step % config.printFreq == 0:
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, config.niter, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
summary_writer.add_scalar("loss/D", errD.item(), global_step)
summary_writer.add_scalar("loss/G", errG.item(), global_step)
summary_writer.add_scalar("output/D_real", D_x, global_step)
summary_writer.add_scalar("output/D_fake", D_G_z1, global_step)
# every epoch save samples
fake = netG(fixed_noise)
# vutils.save_image(fake.detach(),
# '%s/fake_samples_step-%06d.png' % (exp_dir, global_step),
# normalize=True)
fake_grid = vutils.make_grid(fake.detach(), normalize=True)
summary_writer.add_image("G_samples", fake_grid, global_step)
# generate samples for IS evaluation
IS_fake = []
for i in range(10):
noise = torch.randn(500, config.nz, 1, 1, device=device)
IS_fake.append(netG(noise))
IS_fake = torch.cat(IS_fake)
IS_mean, IS_std = mnist_inception_score(IS_fake, device)
print("IS score: mean=%.4f, std=%.4f" % (IS_mean, IS_std))
summary_writer.add_scalar("IS_mean", IS_mean, global_step)
# do checkpointing
checkpoint = {'state_gen': netG.state_dict(),
'state_dis': netD.state_dict()}
torch.save(checkpoint, '%s/checkpoint_step_%06d.pth' % (exp_dir, global_step))
last_chkpt = '%s/checkpoint_step_%06d.pth' % (exp_dir, global_step)
if epoch == 0:
# last_chkpt = '%s/checkpoint_step_%06d.pth' % (exp_dir, 0) # for now
checkpoint_1 = torch.load(last_chkpt, map_location=device)
if config.compute_eig:
# compute eigenvalues for epoch 1, just in case
gen_eigs_curr, dis_eigs_curr, game_eigs_curr = comp_and_save_eigs(global_step)
# if (epoch + 1) % 10 == 0:
if global_step > 30000 and epoch % 5 == 0:
checkpoint_2 = torch.load(last_chkpt, map_location=device)
print("Computing path statistics...")
t = time.time()
hist = compute_path_stats(evalG, evalD, checkpoint_1, checkpoint_2, eval_dataloader,
config, model_loss_gen, model_loss_dis, device, verbose=True)
with open("%s/hist_%d.pkl" % (plots_dir, global_step), 'wb') as f:
pickle.dump(hist, f)
plot_path_stats(hist, plots_dir, summary_writer, global_step)
print("Took %.2f minutes" % ((time.time() - t) / 60.))
if config.compute_eig and global_step > 30000 and epoch % 10 == 0:
# compute eigenvalues and save them
gen_eigs_curr, dis_eigs_curr, game_eigs_curr = comp_and_save_eigs(global_step)
plot_eigenvalues([gen_eigs_init, gen_eigs_curr], [dis_eigs_init, dis_eigs_curr],
[game_eigs_init, game_eigs_curr],
['init', 'step_%d' % global_step], plots_dir, summary_writer,
step=global_step)
class Config(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', default='./data', help='path to dataset')
parser.add_argument('--workers', type=int, default=2, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--printFreq', type=int, default=50, help='# updates before each print')
parser.add_argument('--model', type=str, default='dcgan', choices=['dcgan', 'wgan', 'wgan_gp'],
help='model type of GAN model')
parser.add_argument('--n_critic', type=int, default=5, help='number of critic updates per generator update (wgan/wgan_gp)')
parser.add_argument('--gp_lambda', type=int, default=10, help='weight for gradient penalty (wgan_gp)')
parser.add_argument('--clip', type=float, default=0.01, help='weight clip range (wgan)')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nc', type=int, default=1)
parser.add_argument('--niter', type=int, default=200, help='number of epochs to train for')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0002')
parser.add_argument('--lrG', type=float, default=None, help='learning rate, default=0.0002 -- same as lrD')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--beta2', type=float, default=0.999, help='beta1 for adam. default=0.999')
parser.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'extraadam', 'sgd', 'rmsprop'],
help='training optimizer')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--dnorm', default='spectral', choices=['batch', 'spectral', 'none', 'instance', 'layer'], help="Discriminator normalization")
parser.add_argument('--exp_dir', type=str, default='EXP', help='directory of experiment')
parser.add_argument('--exp_name', type=str, default='debug', help='directory of experiment')
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('--compute_eig', type=int, choices=[0, 1], default=0)
self.parser = parser
def parse_args(self):
return self.parser.parse_args()
if __name__ == "__main__":
main(Config().parse_args())
|
[
"pickle.dump",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.randn",
"torch.cat",
"torch.nn.InstanceNorm2d",
"torch.nn.GroupNorm",
"torchvision.transforms.Normalize",
"os.path.join",
"random.randint",
"plot_path_tools.plot_eigenvalues",
"torch.load",
"os.path.exists",
"numpy.random.RandomState",
"torchvision.transforms.ToTensor",
"random.seed",
"gan_eval_metrics.mnist_inception_score",
"torch.nn.Tanh",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.cuda.manual_seed",
"torch.nn.BatchNorm2d",
"plot_path_tools.plot_path_stats",
"torch.cuda.is_available",
"torch.nn.LeakyReLU",
"torchvision.transforms.Resize",
"torch.utils.data.Subset",
"tensorboardX.SummaryWriter",
"torch.nn.ReLU",
"os.makedirs",
"torch.nn.ConvTranspose2d",
"torch.nn.utils.spectral_norm",
"time.time",
"torch.save",
"torch.sigmoid",
"plot_path_tools.compute_eigenvalues",
"torch.nn.functional.softplus",
"plot_path_tools.compute_path_stats"
] |
[((7565, 7610), 'os.path.join', 'os.path.join', (['config.exp_dir', 'config.exp_name'], {}), '(config.exp_dir, config.exp_name)\n', (7577, 7610), False, 'import os\n'), ((7693, 7729), 'os.path.join', 'os.path.join', (['exp_dir', '"""extra_plots"""'], {}), "(exp_dir, 'extra_plots')\n", (7705, 7729), False, 'import os\n'), ((7937, 7967), 'random.seed', 'random.seed', (['config.manualSeed'], {}), '(config.manualSeed)\n', (7948, 7967), False, 'import random\n'), ((7972, 8008), 'torch.manual_seed', 'torch.manual_seed', (['config.manualSeed'], {}), '(config.manualSeed)\n', (7989, 8008), False, 'import torch\n'), ((8013, 8046), 'numpy.random.seed', 'np.random.seed', (['config.manualSeed'], {}), '(config.manualSeed)\n', (8027, 8046), True, 'import numpy as np\n'), ((8054, 8079), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8077, 8079), False, 'import torch\n'), ((8445, 8492), 'torch.randn', 'torch.randn', (['(64)', 'config.nz', '(1)', '(1)'], {'device': 'device'}), '(64, config.nz, 1, 1, device=device)\n', (8456, 8492), False, 'import torch\n'), ((10854, 10884), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'exp_dir'}), '(log_dir=exp_dir)\n', (10867, 10884), False, 'from tensorboardX import SummaryWriter\n'), ((1623, 1652), 'torch.utils.data.Subset', 'Subset', (['dataset', 'idx[:subset]'], {}), '(dataset, idx[:subset])\n', (1629, 1652), False, 'from torch.utils.data import Subset\n'), ((7622, 7645), 'os.path.exists', 'os.path.exists', (['exp_dir'], {}), '(exp_dir)\n', (7636, 7645), False, 'import os\n'), ((7655, 7675), 'os.makedirs', 'os.makedirs', (['exp_dir'], {}), '(exp_dir)\n', (7666, 7675), False, 'import os\n'), ((7741, 7766), 'os.path.exists', 'os.path.exists', (['plots_dir'], {}), '(plots_dir)\n', (7755, 7766), False, 'import os\n'), ((7776, 7798), 'os.makedirs', 'os.makedirs', (['plots_dir'], {}), '(plots_dir)\n', (7787, 7798), False, 'import os\n'), ((7862, 7886), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (7876, 7886), False, 'import random\n'), ((8089, 8130), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['config.manualSeed'], {}), '(config.manualSeed)\n', (8111, 8130), False, 'import torch\n'), ((11190, 11275), 'torch.load', 'torch.load', (["('%s/checkpoint_step_%06d.pth' % (exp_dir, step))"], {'map_location': 'device'}), "('%s/checkpoint_step_%06d.pth' % (exp_dir, step), map_location=device\n )\n", (11200, 11275), False, 'import torch\n'), ((11479, 11609), 'plot_path_tools.compute_eigenvalues', 'compute_eigenvalues', (['evalG', 'evalD', 'eig_dataloader', 'config', 'model_loss_gen', 'model_loss_dis', 'device'], {'verbose': '(True)', 'n_eigs': 'n_eigs'}), '(evalG, evalD, eig_dataloader, config, model_loss_gen,\n model_loss_dis, device, verbose=True, n_eigs=n_eigs)\n', (11498, 11609), False, 'from plot_path_tools import compute_path_stats, plot_path_stats, compute_eigenvalues, plot_eigenvalues\n'), ((15201, 15219), 'torch.cat', 'torch.cat', (['IS_fake'], {}), '(IS_fake)\n', (15210, 15219), False, 'import torch\n'), ((15247, 15285), 'gan_eval_metrics.mnist_inception_score', 'mnist_inception_score', (['IS_fake', 'device'], {}), '(IS_fake, device)\n', (15268, 15285), False, 'from gan_eval_metrics import mnist_inception_score\n'), ((15564, 15642), 'torch.save', 'torch.save', (['checkpoint', "('%s/checkpoint_step_%06d.pth' % (exp_dir, global_step))"], {}), "(checkpoint, '%s/checkpoint_step_%06d.pth' % (exp_dir, global_step))\n", (15574, 15642), False, 'import torch\n'), ((17362, 17387), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17385, 17387), False, 'import argparse\n'), ((2610, 2662), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nz', '(ngf * 4)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(nz, ngf * 4, 4, 1, 0, bias=False)\n', (2628, 2662), True, 'import torch.nn as nn\n'), ((2676, 2699), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 4)'], {}), '(ngf * 4)\n', (2690, 2699), True, 'import torch.nn as nn\n'), ((2713, 2726), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2720, 2726), True, 'import torch.nn as nn\n'), ((2741, 2798), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 4)', '(ngf * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n', (2759, 2798), True, 'import torch.nn as nn\n'), ((2812, 2835), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ngf * 2)'], {}), '(ngf * 2)\n', (2826, 2835), True, 'import torch.nn as nn\n'), ((2849, 2862), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2856, 2862), True, 'import torch.nn as nn\n'), ((2877, 2930), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(ngf * 2)', 'ngf', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(ngf * 2, ngf, 4, 2, 1, bias=False)\n', (2895, 2930), True, 'import torch.nn as nn\n'), ((2944, 2963), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['ngf'], {}), '(ngf)\n', (2958, 2963), True, 'import torch.nn as nn\n'), ((2977, 2990), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2984, 2990), True, 'import torch.nn as nn\n'), ((3005, 3052), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['ngf', 'nc', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(ngf, nc, 4, 2, 1, bias=True)\n', (3023, 3052), True, 'import torch.nn as nn\n'), ((3066, 3075), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3073, 3075), True, 'import torch.nn as nn\n'), ((5086, 5107), 'torch.sigmoid', 'torch.sigmoid', (['output'], {}), '(output)\n', (5099, 5107), False, 'import torch\n'), ((6944, 6962), 'torch.nn.functional.softplus', 'F.softplus', (['(-p_gen)'], {}), '(-p_gen)\n', (6954, 6962), True, 'import torch.nn.functional as F\n'), ((8168, 8193), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8191, 8193), False, 'import torch\n'), ((10738, 10774), 'os.path.join', 'os.path.join', (['exp_dir', '"""config.json"""'], {}), "(exp_dir, 'config.json')\n", (10750, 10774), False, 'import os\n'), ((11687, 11735), 'os.path.join', 'os.path.join', (['plots_dir', "('eigenvalues_%d' % step)"], {}), "(plots_dir, 'eigenvalues_%d' % step)\n", (11699, 11735), False, 'import os\n'), ((12435, 12490), 'torch.randn', 'torch.randn', (['batch_size', 'config.nz', '(1)', '(1)'], {'device': 'device'}), '(batch_size, config.nz, 1, 1, device=device)\n', (12446, 12490), False, 'import torch\n'), ((15094, 15142), 'torch.randn', 'torch.randn', (['(500)', 'config.nz', '(1)', '(1)'], {'device': 'device'}), '(500, config.nz, 1, 1, device=device)\n', (15105, 15142), False, 'import torch\n'), ((15853, 15896), 'torch.load', 'torch.load', (['last_chkpt'], {'map_location': 'device'}), '(last_chkpt, map_location=device)\n', (15863, 15896), False, 'import torch\n'), ((16207, 16250), 'torch.load', 'torch.load', (['last_chkpt'], {'map_location': 'device'}), '(last_chkpt, map_location=device)\n', (16217, 16250), False, 'import torch\n'), ((16317, 16328), 'time.time', 'time.time', ([], {}), '()\n', (16326, 16328), False, 'import time\n'), ((16349, 16496), 'plot_path_tools.compute_path_stats', 'compute_path_stats', (['evalG', 'evalD', 'checkpoint_1', 'checkpoint_2', 'eval_dataloader', 'config', 'model_loss_gen', 'model_loss_dis', 'device'], {'verbose': '(True)'}), '(evalG, evalD, checkpoint_1, checkpoint_2,\n eval_dataloader, config, model_loss_gen, model_loss_dis, device,\n verbose=True)\n', (16367, 16496), False, 'from plot_path_tools import compute_path_stats, plot_path_stats, compute_eigenvalues, plot_eigenvalues\n'), ((16657, 16718), 'plot_path_tools.plot_path_stats', 'plot_path_stats', (['hist', 'plots_dir', 'summary_writer', 'global_step'], {}), '(hist, plots_dir, summary_writer, global_step)\n', (16672, 16718), False, 'from plot_path_tools import compute_path_stats, plot_path_stats, compute_eigenvalues, plot_eigenvalues\n'), ((17015, 17217), 'plot_path_tools.plot_eigenvalues', 'plot_eigenvalues', (['[gen_eigs_init, gen_eigs_curr]', '[dis_eigs_init, dis_eigs_curr]', '[game_eigs_init, game_eigs_curr]', "['init', 'step_%d' % global_step]", 'plots_dir', 'summary_writer'], {'step': 'global_step'}), "([gen_eigs_init, gen_eigs_curr], [dis_eigs_init,\n dis_eigs_curr], [game_eigs_init, game_eigs_curr], ['init', 'step_%d' %\n global_step], plots_dir, summary_writer, step=global_step)\n", (17031, 17217), False, 'from plot_path_tools import compute_path_stats, plot_path_stats, compute_eigenvalues, plot_eigenvalues\n'), ((1565, 1591), 'numpy.random.RandomState', 'np.random.RandomState', (['(123)'], {}), '(123)\n', (1586, 1591), True, 'import numpy as np\n'), ((2099, 2118), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim'], {}), '(dim)\n', (2113, 2118), True, 'import torch.nn as nn\n'), ((3885, 3916), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3897, 3916), True, 'import torch.nn as nn\n'), ((4032, 4063), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4044, 4063), True, 'import torch.nn as nn\n'), ((4183, 4214), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4195, 4214), True, 'import torch.nn as nn\n'), ((4233, 4274), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(1)', '(4)', '(1)', '(0)'], {'bias': '(True)'}), '(ndf * 4, 1, 4, 1, 0, bias=True)\n', (4242, 4274), True, 'import torch.nn as nn\n'), ((4359, 4397), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(nc, ndf, 4, 2, 1, bias=True)\n', (4368, 4397), True, 'import torch.nn as nn\n'), ((4415, 4446), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4427, 4446), True, 'import torch.nn as nn\n'), ((4465, 4508), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(ndf, ndf * 2, 4, 2, 1, bias=True)\n', (4474, 4508), True, 'import torch.nn as nn\n'), ((4526, 4549), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 2)'], {}), '(ndf * 2)\n', (4540, 4549), True, 'import torch.nn as nn\n'), ((4567, 4598), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4579, 4598), True, 'import torch.nn as nn\n'), ((4617, 4664), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=True)\n', (4626, 4664), True, 'import torch.nn as nn\n'), ((4682, 4705), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(ndf * 4)'], {}), '(ndf * 4)\n', (4696, 4705), True, 'import torch.nn as nn\n'), ((4723, 4754), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4735, 4754), True, 'import torch.nn as nn\n'), ((4773, 4814), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 4)', '(1)', '(4)', '(1)', '(0)'], {'bias': '(True)'}), '(ndf * 4, 1, 4, 1, 0, bias=True)\n', (4782, 4814), True, 'import torch.nn as nn\n'), ((6771, 6790), 'torch.nn.functional.softplus', 'F.softplus', (['(-p_real)'], {}), '(-p_real)\n', (6781, 6790), True, 'import torch.nn.functional as F\n'), ((6800, 6817), 'torch.nn.functional.softplus', 'F.softplus', (['p_gen'], {}), '(p_gen)\n', (6810, 6817), True, 'import torch.nn.functional as F\n'), ((8737, 8760), 'torch.load', 'torch.load', (['config.netG'], {}), '(config.netG)\n', (8747, 8760), False, 'import torch\n'), ((9093, 9116), 'torch.load', 'torch.load', (['config.netD'], {}), '(config.netD)\n', (9103, 9116), False, 'import torch\n'), ((16623, 16643), 'pickle.dump', 'pickle.dump', (['hist', 'f'], {}), '(hist, f)\n', (16634, 16643), False, 'import pickle\n'), ((1314, 1342), 'torchvision.transforms.Resize', 'transforms.Resize', (['imageSize'], {}), '(imageSize)\n', (1331, 1342), True, 'import torchvision.transforms as transforms\n'), ((1373, 1394), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1392, 1394), True, 'import torchvision.transforms as transforms\n'), ((1425, 1461), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5,)', '(0.5,)'], {}), '((0.5,), (0.5,))\n', (1445, 1461), True, 'import torchvision.transforms as transforms\n'), ((2173, 2195), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['dim'], {}), '(dim)\n', (2190, 2195), True, 'import torch.nn as nn\n'), ((2247, 2267), 'torch.nn.GroupNorm', 'nn.GroupNorm', (['(1)', 'dim'], {}), '(1, dim)\n', (2259, 2267), True, 'import torch.nn as nn\n'), ((3817, 3855), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'ndf', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(nc, ndf, 4, 2, 1, bias=True)\n', (3826, 3855), True, 'import torch.nn as nn\n'), ((3955, 3998), 'torch.nn.Conv2d', 'nn.Conv2d', (['ndf', '(ndf * 2)', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(ndf, ndf * 2, 4, 2, 1, bias=True)\n', (3964, 3998), True, 'import torch.nn as nn\n'), ((4102, 4149), 'torch.nn.Conv2d', 'nn.Conv2d', (['(ndf * 2)', '(ndf * 4)', '(4)', '(2)', '(1)'], {'bias': '(True)'}), '(ndf * 2, ndf * 4, 4, 2, 1, bias=True)\n', (4111, 4149), True, 'import torch.nn as nn\n'), ((2314, 2335), 'torch.nn.utils.spectral_norm', 'spectral_norm', (['module'], {}), '(module)\n', (2327, 2335), False, 'from torch.nn.utils import spectral_norm\n'), ((16762, 16773), 'time.time', 'time.time', ([], {}), '()\n', (16771, 16773), False, 'import time\n')]
|
import logging
from moonreader_tools.parsers.base import BookParser
from moonreader_tools.utils import (
get_book_type,
get_moonreader_files_from_filelist,
get_same_book_files,
title_from_fname,
)
from .drobpox_utils import dicts_from_pairs, extract_book_paths_from_dir_entries
class DropboxDownloader(object):
"""Class to obtain bookdata from dropbox syncronized account"""
_DEFAULT_DROPBOX_PATH = "/Apps/Books/.Moon+/Cache"
def __init__(self, dropbox_client, books_path="", workers=8, logger=None):
"""
:param dropbox_client: Instantiated dropbox client
:param books_path: Absolute path to dropbox's \
dir with syncronized notes
:param workers: number of concurrent workers to download\
data from Dropbox
"""
self.__dropbox_client = dropbox_client
self.books_path = books_path or self._DEFAULT_DROPBOX_PATH
self.workers = workers
def get_books(self, path: str = "", book_count: int = None):
"""Obtains book objects from dropbox folder
:param path: Dropbox directory with syncronized\
book data
:param book_count: number of books to read
"""
if not path and not self.books_path:
raise ValueError("Path to read data from is not specified")
if not path:
path = self.books_path
folder_contents = self.__dropbox_client.files_list_folder(path)
files = extract_book_paths_from_dir_entries(folder_contents.entries)
moonreader_files = get_moonreader_files_from_filelist(files)
if book_count is not None:
file_pairs = get_same_book_files(moonreader_files)[:book_count]
else:
file_pairs = get_same_book_files(moonreader_files)
for book_dict in dicts_from_pairs(
self.__dropbox_client, file_pairs, workers=self.workers
):
try:
note_file, stat_file = book_dict["note_file"], book_dict["stat_file"]
book_name = title_from_fname(note_file[0] or stat_file[0])
book_type = get_book_type(note_file[0] or stat_file[0])
with BookParser(book_type=book_type) as reader:
reader = (
reader.set_notes_fobj(note_file[1])
.set_stats_fobj(stat_file[1])
.set_book_name(book_name)
)
yield reader.build()
except Exception:
err_msg = "Exception occured when creating book object."
logging.exception(err_msg)
|
[
"moonreader_tools.parsers.base.BookParser",
"logging.exception",
"moonreader_tools.utils.title_from_fname",
"moonreader_tools.utils.get_book_type",
"moonreader_tools.utils.get_moonreader_files_from_filelist",
"moonreader_tools.utils.get_same_book_files"
] |
[((1554, 1595), 'moonreader_tools.utils.get_moonreader_files_from_filelist', 'get_moonreader_files_from_filelist', (['files'], {}), '(files)\n', (1588, 1595), False, 'from moonreader_tools.utils import get_book_type, get_moonreader_files_from_filelist, get_same_book_files, title_from_fname\n'), ((1746, 1783), 'moonreader_tools.utils.get_same_book_files', 'get_same_book_files', (['moonreader_files'], {}), '(moonreader_files)\n', (1765, 1783), False, 'from moonreader_tools.utils import get_book_type, get_moonreader_files_from_filelist, get_same_book_files, title_from_fname\n'), ((1656, 1693), 'moonreader_tools.utils.get_same_book_files', 'get_same_book_files', (['moonreader_files'], {}), '(moonreader_files)\n', (1675, 1693), False, 'from moonreader_tools.utils import get_book_type, get_moonreader_files_from_filelist, get_same_book_files, title_from_fname\n'), ((2038, 2084), 'moonreader_tools.utils.title_from_fname', 'title_from_fname', (['(note_file[0] or stat_file[0])'], {}), '(note_file[0] or stat_file[0])\n', (2054, 2084), False, 'from moonreader_tools.utils import get_book_type, get_moonreader_files_from_filelist, get_same_book_files, title_from_fname\n'), ((2113, 2156), 'moonreader_tools.utils.get_book_type', 'get_book_type', (['(note_file[0] or stat_file[0])'], {}), '(note_file[0] or stat_file[0])\n', (2126, 2156), False, 'from moonreader_tools.utils import get_book_type, get_moonreader_files_from_filelist, get_same_book_files, title_from_fname\n'), ((2178, 2209), 'moonreader_tools.parsers.base.BookParser', 'BookParser', ([], {'book_type': 'book_type'}), '(book_type=book_type)\n', (2188, 2209), False, 'from moonreader_tools.parsers.base import BookParser\n'), ((2598, 2624), 'logging.exception', 'logging.exception', (['err_msg'], {}), '(err_msg)\n', (2615, 2624), False, 'import logging\n')]
|
import pandas as pd
import numpy as np
import os
import argparse
from des_stacks.utils.gen_tools import get_good_des_chips
good_des_chips = get_good_des_chips()
def parser():
parser = argparse.ArgumentParser()
parser.add_argument('-f','--field',default = 'all')
parser.add_argument('-my','--year',default='none')
parser.add_argument('-ch','--chip',default='all')
parser.add_argument('-df','--df',default = 'none')
parser.add_argument('-sf','--savename',default = 'combined.cat')
return parser.parse_args()
def main(args):
fields = ['X1','X2','X3','C1','C2','C3','E1','E2','S1','S2']
mys = ['none']
if args.year !='none':
mys = args.year
if args.field != 'all':
try:
fields = args.field.split(',')
except:
try:
fields = args.field[0].split(' ')
except:
fields =args.field
if args.chip !='all':
chips = args.chip.split(',')
main_df = pd.DataFrame()
if args.df !='none':
main_df = pd.read_csv(args.df,index_col=0)
for my in mys:
for f in fields:
f = 'SN-'+f
for ch in good_des_chips:
ch = int(ch)
cap_chip_dir = '/media/data3/wiseman/des/coadding/5yr_stacks/MY%s/%s/CAP/%s'%(my,f,ch)
cat = os.path.join(cap_chip_dir,'%s_%s_%s_obj_deep_v7.cat'%(my,f,ch))
cat_df = pd.read_csv(cat,index_col=0)
print ('Adding cat: %s'%cat, ' of length ',len(cat_df))
main_df = main_df.append(cat_df)
main_df.to_hdf(os.path.join('/media/data3/wiseman/des/coadding/results',args.savename),key='main')
print ('Saved new file to %s'%os.path.join('/media/data3/wiseman/des/coadding/results',args.savename))
if __name__=="__main__":
args=parser()
main(args)
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"pandas.read_csv",
"des_stacks.utils.gen_tools.get_good_des_chips",
"os.path.join"
] |
[((140, 160), 'des_stacks.utils.gen_tools.get_good_des_chips', 'get_good_des_chips', ([], {}), '()\n', (158, 160), False, 'from des_stacks.utils.gen_tools import get_good_des_chips\n'), ((189, 214), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (212, 214), False, 'import argparse\n'), ((989, 1003), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1001, 1003), True, 'import pandas as pd\n'), ((1047, 1080), 'pandas.read_csv', 'pd.read_csv', (['args.df'], {'index_col': '(0)'}), '(args.df, index_col=0)\n', (1058, 1080), True, 'import pandas as pd\n'), ((1599, 1671), 'os.path.join', 'os.path.join', (['"""/media/data3/wiseman/des/coadding/results"""', 'args.savename'], {}), "('/media/data3/wiseman/des/coadding/results', args.savename)\n", (1611, 1671), False, 'import os\n'), ((1717, 1789), 'os.path.join', 'os.path.join', (['"""/media/data3/wiseman/des/coadding/results"""', 'args.savename'], {}), "('/media/data3/wiseman/des/coadding/results', args.savename)\n", (1729, 1789), False, 'import os\n'), ((1341, 1409), 'os.path.join', 'os.path.join', (['cap_chip_dir', "('%s_%s_%s_obj_deep_v7.cat' % (my, f, ch))"], {}), "(cap_chip_dir, '%s_%s_%s_obj_deep_v7.cat' % (my, f, ch))\n", (1353, 1409), False, 'import os\n'), ((1430, 1459), 'pandas.read_csv', 'pd.read_csv', (['cat'], {'index_col': '(0)'}), '(cat, index_col=0)\n', (1441, 1459), True, 'import pandas as pd\n')]
|
"""
Tests for the proxy support in pip.
"""
import pip
from tests.lib import SRC_DIR
from tests.lib.path import Path
def test_correct_pip_version():
"""
Check we are importing pip from the right place.
"""
assert Path(pip.__file__).folder.folder.abspath == SRC_DIR
|
[
"tests.lib.path.Path"
] |
[((233, 251), 'tests.lib.path.Path', 'Path', (['pip.__file__'], {}), '(pip.__file__)\n', (237, 251), False, 'from tests.lib.path import Path\n')]
|
# -*- encoding: utf-8 -*-
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.core.urlresolvers import reverse
from mapentity.factories import UserFactory
from geotrek.common.parsers import Parser
class ViewsTest(TestCase):
def setUp(self):
self.user = UserFactory.create(username='homer', password='<PASSWORD>')
success = self.client.login(
username=self.user.username, password='<PASSWORD>')
self.assertTrue(success)
def test_settings_json(self):
url = reverse('common:settings_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_admin_check_extents(self):
url = reverse('common:check_extents')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.user.is_superuser = True
self.user.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class ViewsImportTest(TestCase):
def setUp(self):
self.user = UserFactory.create(username='homer', password='<PASSWORD>')
success = self.client.login(username=self.user.username, password='<PASSWORD>')
self.assertTrue(success)
def test_import_form_access(self):
url = reverse('common:import_dataset')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_import_update_access(self):
url = reverse('common:import_update_json')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_import_from_file_good_file(self):
self.user.is_superuser = True
self.user.save()
real_archive = open('geotrek/common/tests/data/test.txt.gz', 'r+')
url = reverse('common:import_dataset')
response_real = self.client.post(
url, {
'upload-file': 'Upload',
'with-file-parser': '7',
'with-file-zipfile': real_archive
}
)
self.assertEqual(response_real.status_code, 200)
def test_import_from_file_bad_file(self):
self.user.is_superuser = True
self.user.save()
Parser.label = "Test"
fake_archive = SimpleUploadedFile(
"file.doc", "file_content", content_type="application/msword")
url = reverse('common:import_dataset')
response_fake = self.client.post(
url, {
'upload-file': 'Upload',
'with-file-parser': '7',
'with-file-zipfile': fake_archive
}
)
self.assertEqual(response_fake.status_code, 200)
self.assertContains(response_fake, "File must be of ZIP type.", 1)
Parser.label = None
|
[
"django.core.files.uploadedfile.SimpleUploadedFile",
"django.core.urlresolvers.reverse",
"mapentity.factories.UserFactory.create"
] |
[((325, 384), 'mapentity.factories.UserFactory.create', 'UserFactory.create', ([], {'username': '"""homer"""', 'password': '"""<PASSWORD>"""'}), "(username='homer', password='<PASSWORD>')\n", (343, 384), False, 'from mapentity.factories import UserFactory\n'), ((568, 599), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:settings_json"""'], {}), "('common:settings_json')\n", (575, 599), False, 'from django.core.urlresolvers import reverse\n'), ((747, 778), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:check_extents"""'], {}), "('common:check_extents')\n", (754, 778), False, 'from django.core.urlresolvers import reverse\n'), ((1103, 1162), 'mapentity.factories.UserFactory.create', 'UserFactory.create', ([], {'username': '"""homer"""', 'password': '"""<PASSWORD>"""'}), "(username='homer', password='<PASSWORD>')\n", (1121, 1162), False, 'from mapentity.factories import UserFactory\n'), ((1338, 1370), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:import_dataset"""'], {}), "('common:import_dataset')\n", (1345, 1370), False, 'from django.core.urlresolvers import reverse\n'), ((1519, 1555), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:import_update_json"""'], {}), "('common:import_update_json')\n", (1526, 1555), False, 'from django.core.urlresolvers import reverse\n'), ((1849, 1881), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:import_dataset"""'], {}), "('common:import_dataset')\n", (1856, 1881), False, 'from django.core.urlresolvers import reverse\n'), ((2322, 2408), 'django.core.files.uploadedfile.SimpleUploadedFile', 'SimpleUploadedFile', (['"""file.doc"""', '"""file_content"""'], {'content_type': '"""application/msword"""'}), "('file.doc', 'file_content', content_type=\n 'application/msword')\n", (2340, 2408), False, 'from django.core.files.uploadedfile import SimpleUploadedFile\n'), ((2431, 2463), 'django.core.urlresolvers.reverse', 'reverse', (['"""common:import_dataset"""'], {}), "('common:import_dataset')\n", (2438, 2463), False, 'from django.core.urlresolvers import reverse\n')]
|
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
from src.system import System
def plot():
data = {
"hp": {
"cop": 3.0
},
"swhe": {
"pipe": {
"outer-dia": 0.02667,
"inner-dia": 0.0215392,
"length": 100,
"density": 950,
"conductivity": 0.4
},
"diameter": 1.2,
"horizontal-spacing": 0.05,
"vertical-spacing": 0.05,
},
"fluid": {
"fluid-name": "PG",
"concentration": 20
}
}
system = System(data)
x = np.arange(-3000, 3000, 200)
y_a = [system.simulate(m, 0.1, 15) for m in x]
y_b = [system.simulate(m, 0.25, 15) for m in x]
y_c = [system.simulate(m, 1.0, 15) for m in x]
fig, ax = plt.subplots()
ax.plot(x, y_a, label=r"$T_{appr}$ $\dot{m}=0.10$ [kg/s]")
ax.plot(x, y_b, label=r"$T_{appr}$ $\dot{m}=0.25$ [kg/s]", linestyle="--")
ax.plot(x, y_c, label=r"$T_{appr}$ $\dot{m}=1.00$ [kg/s]", linestyle=":")
ax.set_xlabel(r"$\dot{q}_{zone}$ [W]")
ax.set_ylabel(r"$T_{appr}$ [C]")
ax.legend()
ax.grid()
f_name = Path(__file__).parent / "_system_approach_temp.png"
plt.savefig(f_name, bbox_inches="tight")
if __name__ == "__main__":
plot()
|
[
"src.system.System",
"pathlib.Path",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] |
[((648, 660), 'src.system.System', 'System', (['data'], {}), '(data)\n', (654, 660), False, 'from src.system import System\n'), ((670, 697), 'numpy.arange', 'np.arange', (['(-3000)', '(3000)', '(200)'], {}), '(-3000, 3000, 200)\n', (679, 697), True, 'import numpy as np\n'), ((866, 880), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (878, 880), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1321), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f_name'], {'bbox_inches': '"""tight"""'}), "(f_name, bbox_inches='tight')\n", (1292, 1321), True, 'import matplotlib.pyplot as plt\n'), ((1225, 1239), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1229, 1239), False, 'from pathlib import Path\n')]
|
"""
Prim's (also known as Jarník's) algorithm is a greedy algorithm that finds a minimum
spanning tree for a weighted undirected graph. This means it finds a subset of the
edges that forms a tree that includes every vertex, where the total weight of all the
edges in the tree is minimized. The algorithm operates by building this tree one vertex
at a time, from an arbitrary starting vertex, at each step adding the cheapest possible
connection from the tree to another vertex.
"""
from sys import maxsize
from typing import Generic, Optional, TypeVar
T = TypeVar("T")
def get_parent_position(position: int) -> int:
"""
heap helper function get the position of the parent of the current node
>>> get_parent_position(1)
0
>>> get_parent_position(2)
0
"""
return (position - 1) // 2
def get_child_left_position(position: int) -> int:
"""
heap helper function get the position of the left child of the current node
>>> get_child_left_position(0)
1
"""
return (2 * position) + 1
def get_child_right_position(position: int) -> int:
"""
heap helper function get the position of the right child of the current node
>>> get_child_right_position(0)
2
"""
return (2 * position) + 2
class MinPriorityQueue(Generic[T]):
"""
Minimum Priority Queue Class
Functions:
is_empty: function to check if the priority queue is empty
push: function to add an element with given priority to the queue
extract_min: function to remove and return the element with lowest weight (highest
priority)
update_key: function to update the weight of the given key
_bubble_up: helper function to place a node at the proper position (upward
movement)
_bubble_down: helper function to place a node at the proper position (downward
movement)
_swap_nodes: helper function to swap the nodes at the given positions
>>> queue = MinPriorityQueue()
>>> queue.push(1, 1000)
>>> queue.push(2, 100)
>>> queue.push(3, 4000)
>>> queue.push(4, 3000)
>>> print(queue.extract_min())
2
>>> queue.update_key(4, 50)
>>> print(queue.extract_min())
4
>>> print(queue.extract_min())
1
>>> print(queue.extract_min())
3
"""
def __init__(self) -> None:
self.heap: list[tuple[T, int]] = []
self.position_map: dict[T, int] = {}
self.elements: int = 0
def __len__(self) -> int:
return self.elements
def __repr__(self) -> str:
return str(self.heap)
def is_empty(self) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def push(self, elem: T, weight: int) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight))
self.position_map[elem] = self.elements
self.elements += 1
self._bubble_up(elem)
def extract_min(self) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0, self.elements - 1)
elem, _ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
bubble_down_elem, _ = self.heap[0]
self._bubble_down(bubble_down_elem)
return elem
def update_key(self, elem: T, weight: int) -> None:
# Update the weight of the given key
position = self.position_map[elem]
self.heap[position] = (elem, weight)
if position > 0:
parent_position = get_parent_position(position)
_, parent_weight = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(elem)
else:
self._bubble_down(elem)
else:
self._bubble_down(elem)
def _bubble_up(self, elem: T) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
curr_pos = self.position_map[elem]
if curr_pos == 0:
return
parent_position = get_parent_position(curr_pos)
_, weight = self.heap[curr_pos]
_, parent_weight = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(parent_position, curr_pos)
return self._bubble_up(elem)
return
def _bubble_down(self, elem: T) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
curr_pos = self.position_map[elem]
_, weight = self.heap[curr_pos]
child_left_position = get_child_left_position(curr_pos)
child_right_position = get_child_right_position(curr_pos)
if child_left_position < self.elements and child_right_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
_, child_right_weight = self.heap[child_right_position]
if child_right_weight < child_left_weight:
if child_right_weight < weight:
self._swap_nodes(child_right_position, curr_pos)
return self._bubble_down(elem)
if child_left_position < self.elements:
_, child_left_weight = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(child_left_position, curr_pos)
return self._bubble_down(elem)
else:
return
if child_right_position < self.elements:
_, child_right_weight = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(child_right_position, curr_pos)
return self._bubble_down(elem)
else:
return
def _swap_nodes(self, node1_pos: int, node2_pos: int) -> None:
# Swap the nodes at the given positions
node1_elem = self.heap[node1_pos][0]
node2_elem = self.heap[node2_pos][0]
self.heap[node1_pos], self.heap[node2_pos] = (
self.heap[node2_pos],
self.heap[node1_pos],
)
self.position_map[node1_elem] = node2_pos
self.position_map[node2_elem] = node1_pos
class GraphUndirectedWeighted(Generic[T]):
"""
Graph Undirected Weighted Class
Functions:
add_node: function to add a node in the graph
add_edge: function to add an edge between 2 nodes in the graph
"""
def __init__(self) -> None:
self.connections: dict[T, dict[T, int]] = {}
self.nodes: int = 0
def __repr__(self) -> str:
return str(self.connections)
def __len__(self) -> int:
return self.nodes
def add_node(self, node: T) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
self.connections[node] = {}
self.nodes += 1
def add_edge(self, node1: T, node2: T, weight: int) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(node1)
self.add_node(node2)
self.connections[node1][node2] = weight
self.connections[node2][node1] = weight
def prims_algo(
graph: GraphUndirectedWeighted[T],
) -> tuple[dict[T, int], dict[T, Optional[T]]]:
"""
>>> graph = GraphUndirectedWeighted()
>>> graph.add_edge("a", "b", 3)
>>> graph.add_edge("b", "c", 10)
>>> graph.add_edge("c", "d", 5)
>>> graph.add_edge("a", "c", 15)
>>> graph.add_edge("b", "d", 100)
>>> dist, parent = prims_algo(graph)
>>> abs(dist["a"] - dist["b"])
3
>>> abs(dist["d"] - dist["b"])
15
>>> abs(dist["a"] - dist["c"])
13
"""
# prim's algorithm for minimum spanning tree
dist: dict[T, int] = {node: maxsize for node in graph.connections}
parent: dict[T, Optional[T]] = {node: None for node in graph.connections}
priority_queue: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(node, weight)
if priority_queue.is_empty():
return dist, parent
# initialization
node = priority_queue.extract_min()
dist[node] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
# running prim's algorithm
while not priority_queue.is_empty():
node = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
dist[neighbour] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(neighbour, dist[neighbour])
parent[neighbour] = node
return dist, parent
|
[
"typing.TypeVar"
] |
[((558, 570), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (565, 570), False, 'from typing import Generic, Optional, TypeVar\n')]
|
import os.path as mod_path
import sys as mod_sys
import subprocess
from typing import *
def assert_in_git_repository() -> None:
success, lines = execute_git('status', output=False)
if not success:
print('Not a git repository!!!')
mod_sys.exit(1)
def execute_command(cmd: Union[str, List[str]], output: bool=True, prefix: str='', grep: Optional[str]=None) -> Tuple[bool, str]:
result = ''
command = cmd if type(cmd) is list else cmd.split(' ') # type: ignore
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
(cmdout, cmderr) = p.communicate()
if cmdout is None:
return (0, "")
for line in cmdout.decode('utf-8').split('\n'):
output_line = prefix + ('%s' % line).rstrip() + '\n'
if not grep or grep in output_line:
if output and output_line:
print(output_line.rstrip())
mod_sys.stdout.flush()
result += output_line
return (not p.returncode, result)
def execute_git(command: str, output: bool=True, prefix: str='', grep: str="") -> Tuple[bool, str]:
return execute_command('git %s' % command, output, prefix, grep)
def get_branches(remote: bool=False, all: bool=False, merged: bool=False, no_merged: bool=False) -> List[str]:
git_command = 'branch'
if remote:
git_command += ' -r'
if all:
git_command += ' -a'
if merged is True:
git_command += ' --merged'
if no_merged is True:
git_command += ' --no-merged'
success, result = execute_git(git_command, output=False)
assert success
assert result
def _filter_branch(branch: str) -> str:
if '*' in branch:
# Current branch:
return branch.replace('*', '').strip()
elif '->' in branch:
# Branch is an alias
return branch.split('->')[0].strip()
elif 'HEAD detached at' in branch:
return 'HEAD'
return branch.strip()
lines = result.strip().split('\n')
lines = list(map(_filter_branch, lines))
lines = [line for line in lines if line]
return lines
def delete_branch(branch: str, force: bool=False) -> None:
if branch.startswith('remotes/'):
if branch.startswith('remotes/'):
branch = branch.replace('remotes/', '')
parts = branch.split('/')
if len(parts) >= 2:
origin_name, branch_name = parts[0], "/".join(parts[1:])
execute_git('push %s :%s' % (origin_name, branch_name))
else:
print('Don\'t know how to delete %s' % branch)
else:
execute_git('branch %s %s' % ('-D' if force else '-d', branch))
def get_config_properties() -> Dict[str, str]:
executed, output = execute_git('config -l', output=False)
if not executed:
print('Error retrieving git config properties')
mod_sys.exit(1)
result = {}
lines = output.split('\n')
for line in lines:
if '=' in line:
pos = line.find('=')
key = line[0: pos].strip().lower()
value = line[pos + 1:].strip()
result[key] = value
return result
def is_changed() -> bool:
""" Checks if current project has any noncommited changes. """
executed, changed_lines = execute_git('status --porcelain', output=False)
merge_not_finished = mod_path.exists('.git/MERGE_HEAD')
return cast(bool, changed_lines.strip() or merge_not_finished)
def get_git_sha1(branch_name: str) -> str:
success, sha1 = execute_git('log -1 %s --format=%%H --' % branch_name,
output=False)
if not success:
raise Exception(f'Invalid branch {branch_name}')
return sha1.strip()
def distance_to_commit(commit_1: str, commit_2: str) -> int:
success, log = execute_git(f'rev-list {commit_1}..{commit_2} --count', output=False)
if not success:
raise Exception(f'Error calculating distance between {commit_1}..{commit_2}')
return int(log)
|
[
"subprocess.Popen",
"os.path.exists",
"sys.stdout.flush",
"sys.exit"
] |
[((504, 595), 'subprocess.Popen', 'subprocess.Popen', (['command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT', 'bufsize': '(-1)'}), '(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n bufsize=-1)\n', (520, 595), False, 'import subprocess\n'), ((3386, 3420), 'os.path.exists', 'mod_path.exists', (['""".git/MERGE_HEAD"""'], {}), "('.git/MERGE_HEAD')\n", (3401, 3420), True, 'import os.path as mod_path\n'), ((256, 271), 'sys.exit', 'mod_sys.exit', (['(1)'], {}), '(1)\n', (268, 271), True, 'import sys as mod_sys\n'), ((2902, 2917), 'sys.exit', 'mod_sys.exit', (['(1)'], {}), '(1)\n', (2914, 2917), True, 'import sys as mod_sys\n'), ((934, 956), 'sys.stdout.flush', 'mod_sys.stdout.flush', ([], {}), '()\n', (954, 956), True, 'import sys as mod_sys\n')]
|
import os
from uuid import uuid4
from django.utils.deconstruct import deconstructible
# rename file with uuid
@deconstructible
class PathAndRename(object):
def __init__(self, sub_path):
self.path = sub_path
def __call__(self, instance, filename):
ext = filename.split('.')[-1]
# set filename as random string
filename = '{}.{}'.format(uuid4().hex, ext)
# return the whole path to the file
return os.path.join(self.path, filename)
|
[
"uuid.uuid4",
"os.path.join"
] |
[((471, 504), 'os.path.join', 'os.path.join', (['self.path', 'filename'], {}), '(self.path, filename)\n', (483, 504), False, 'import os\n'), ((392, 399), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (397, 399), False, 'from uuid import uuid4\n')]
|
# analyze binom_test to each pair
# for further analyze ANOVA
# gt/-our gt/-nerf -our/gt -our/nerf -nerf/gt nerf/-our
# 40 1 38 77 1 75
# 78 78 78 78 78 78
from scipy import stats
import numpy as np
# choose us, or nerf is no us
choose = [64, 110, 111]
stimuli = ["our-gt", "our-nerf", "nerf-gt"]
total = 60*2
binresult = []
for eachChoose in choose:
prob = stats.binom_test(eachChoose, n=total, p=0.5, alternative="greater")
print(prob)
binresult.append(prob)
np.savetxt("bintest.csv", binresult)
|
[
"numpy.savetxt",
"scipy.stats.binom_test"
] |
[((525, 561), 'numpy.savetxt', 'np.savetxt', (['"""bintest.csv"""', 'binresult'], {}), "('bintest.csv', binresult)\n", (535, 561), True, 'import numpy as np\n'), ((413, 480), 'scipy.stats.binom_test', 'stats.binom_test', (['eachChoose'], {'n': 'total', 'p': '(0.5)', 'alternative': '"""greater"""'}), "(eachChoose, n=total, p=0.5, alternative='greater')\n", (429, 480), False, 'from scipy import stats\n')]
|
import re
import string
import stanza
import nltk.data
import unidecode
import copy
import tensorflow_hub as hub
from pythonrouge.pythonrouge import Pythonrouge
import os, os.path
embed = hub.load("/home/dani/Desktop/licenta/use")
# read a file to an array in which each item is a line from that respective file
def read_file_line_by_line(filename):
content = []
with open(filename) as f:
for line in f:
content.append(line.strip())
return content
# join all elements of an array, separated by a space
def concatenate_text_as_array(text):
return ' '.join(text)
# parse a string to an array in which each element is a group of sentences_in_batch sentences
def parse_text_to_sentences(text, sentences_in_batch=1):
result = []
sentences = nltk.sent_tokenize(text)
for i in range(0, len(sentences), sentences_in_batch):
temp = ''
for j in range(sentences_in_batch):
if i + j < len(sentences):
temp += sentences[i + j] + " "
result.append(temp)
result = [sentences.strip() for sentences in result]
return result
# remove all punctuation from a text
def remove_punctuation(text):
text.replace("'", "")
text = text.translate(str.maketrans('', '', string.punctuation + r"—"))
return text
# split a given text to an array in which each element is a word
def split_in_tokens(text):
text = text.split()
return text
# remove all stop words from given list of words, using as reference NLTK data
def remove_stop_words(tokens):
stop_words = read_file_line_by_line(
"resources/util/stop-words.txt")
return [word for word in tokens if word not in stop_words]
# lowercase all the words given
def tokens_to_lower_case(tokens):
return [word.casefold() for word in tokens]
# check if a given word is written in English script
def is_english(s):
return s.isascii()
# lemmatize given list of words using Stanza (Standford NLP)
def lemmatize(words):
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos,lemma', tokenize_pretokenized=True, verbose=False)
doc = nlp([words])
return [word.lemma for sent in doc.sentences for word in sent.words]
# transliterate words using in non-english script into English script
def transliterate_non_english_words(relevant_tokens):
for i in range(len(relevant_tokens)):
if not is_english(relevant_tokens[i]):
relevant_tokens[i] = unidecode.unidecode(relevant_tokens[i])
return relevant_tokens
# compute the USE embedding of a given sentence
def sentence_to_embedding(sentence):
return embed([sentence]).numpy()[0]
# remove numeric footnotes from a text
def remove_footnotes(text):
return re.sub(r"([a-zA-Z?!;,.\")\]])[0-9]*", r"\1", text)
def read_file_to_text(filename):
lines = read_file_line_by_line(filename)
text = concatenate_text_as_array(lines)
text = text.casefold()
return text
def read_rough_file_to_text(filename):
lines = read_file_line_by_line(filename)
text = concatenate_text_as_array(lines)
return text
# count the number of sentences in a text
def number_of_sentences_in_text(text):
return len(parse_text_to_sentences(text))
def prepare_data(document_number=1):
title = read_file_to_text(
"/home/dani/Desktop/licenta/bachelor-thesis/thesis-project/resources/articles/" + str(
document_number) + "-c.txt")
abstract = read_file_to_text(
"/home/dani/Desktop/licenta/bachelor-thesis/thesis-project/resources/articles/" + str(
document_number) + "-b.txt")
rough_abstract = read_rough_file_to_text(
"/home/dani/Desktop/licenta/bachelor-thesis/thesis-project/resources/articles/" + str(
document_number) + "-b.txt")
text_lines = read_file_line_by_line(
"/home/dani/Desktop/licenta/bachelor-thesis/thesis-project/resources/articles/" + str(
document_number) + "-a.txt")
text = concatenate_text_as_array(text_lines)
text = remove_footnotes(text)
text_as_sentences = parse_text_to_sentences(text)
text_as_sentences_without_footnotes = list(text_as_sentences)
sentences_as_embeddings = []
for sentence in text_as_sentences:
sentence = remove_punctuation(sentence)
sentence = split_in_tokens(sentence)
sentence = tokens_to_lower_case(sentence)
sentence = remove_stop_words(sentence)
sentence = transliterate_non_english_words(sentence)
backup = copy.copy(sentence)
try:
sentence = lemmatize(sentence)
except:
print("didn't do lemma")
sentence = backup
sentence = concatenate_text_as_array(sentence)
sentence = sentence_to_embedding(sentence)
sentences_as_embeddings.append(sentence)
return sentences_as_embeddings, text_as_sentences_without_footnotes, abstract, title, sentence_to_embedding(title), rough_abstract
# compute the weighted ROUGE scores for the whole dataset
def final_results(scores):
result = {}
for score in scores:
for k, v in score.items():
if k not in result:
result[k] = v
else:
result[k] += v
for k, v in result.items():
result[k] = v / len(scores)
return result
# compute the ROUGE score of a machine generated summary against a human generated summary
def rouge_score(generated_summary, human_summary):
rouge = Pythonrouge(summary_file_exist=False, ROUGE_L=True, ROUGE_W=True,
summary=[[generated_summary]], reference=[[[human_summary]]])
return rouge.calc_score()
def get_titles(bound):
titles = []
for i in range(1, bound + 1):
title = read_rough_file_to_text(
"/home/dani/Desktop/licenta/bachelor-thesis/thesis-project/resources/articles/" + str(
i) + "-c.txt")
titles.append(title)
return titles
def get_duc_embeddings(text):
text_as_sentences = parse_text_to_sentences(text)
sentences_as_embeddings = []
for sentence in text_as_sentences:
sentence = remove_punctuation(sentence)
sentence = split_in_tokens(sentence)
sentence = tokens_to_lower_case(sentence)
sentence = remove_stop_words(sentence)
sentence = transliterate_non_english_words(sentence)
backup = copy.copy(sentence)
try:
sentence = lemmatize(sentence)
except:
print("didn't do lemma")
sentence = backup
sentence = concatenate_text_as_array(sentence)
sentence = sentence_to_embedding(sentence)
sentences_as_embeddings.append(sentence)
return sentences_as_embeddings
def get_duc_sentences(text):
return parse_text_to_sentences(text)
def preprocess_duc(doc, summary):
if doc is None or summary is None:
return None, None, None, None, None, None
sentences_as_embeddings = get_duc_embeddings(doc["body"])
duc_as_sentences = get_duc_sentences(doc["body"])
abstract = summary["body"]
title = doc["title"]
title_embedding = sentence_to_embedding(doc["title"].casefold())
return sentences_as_embeddings, duc_as_sentences, abstract, title, title_embedding, abstract # last param is not rough
def get_number_of_texts_in_folder(directory_path):
path, dirs, files = next(os.walk(directory_path))
return len(files) // 3
|
[
"pythonrouge.pythonrouge.Pythonrouge",
"unidecode.unidecode",
"tensorflow_hub.load",
"os.walk",
"copy.copy",
"stanza.Pipeline",
"re.sub"
] |
[((190, 232), 'tensorflow_hub.load', 'hub.load', (['"""/home/dani/Desktop/licenta/use"""'], {}), "('/home/dani/Desktop/licenta/use')\n", (198, 232), True, 'import tensorflow_hub as hub\n'), ((2010, 2120), 'stanza.Pipeline', 'stanza.Pipeline', ([], {'lang': '"""en"""', 'processors': '"""tokenize,mwt,pos,lemma"""', 'tokenize_pretokenized': '(True)', 'verbose': '(False)'}), "(lang='en', processors='tokenize,mwt,pos,lemma',\n tokenize_pretokenized=True, verbose=False)\n", (2025, 2120), False, 'import stanza\n'), ((2735, 2786), 're.sub', 're.sub', (['"""([a-zA-Z?!;,.\\\\")\\\\]])[0-9]*"""', '"""\\\\1"""', 'text'], {}), '(\'([a-zA-Z?!;,.\\\\")\\\\]])[0-9]*\', \'\\\\1\', text)\n', (2741, 2786), False, 'import re\n'), ((5471, 5603), 'pythonrouge.pythonrouge.Pythonrouge', 'Pythonrouge', ([], {'summary_file_exist': '(False)', 'ROUGE_L': '(True)', 'ROUGE_W': '(True)', 'summary': '[[generated_summary]]', 'reference': '[[[human_summary]]]'}), '(summary_file_exist=False, ROUGE_L=True, ROUGE_W=True, summary=[\n [generated_summary]], reference=[[[human_summary]]])\n', (5482, 5603), False, 'from pythonrouge.pythonrouge import Pythonrouge\n'), ((4506, 4525), 'copy.copy', 'copy.copy', (['sentence'], {}), '(sentence)\n', (4515, 4525), False, 'import copy\n'), ((6372, 6391), 'copy.copy', 'copy.copy', (['sentence'], {}), '(sentence)\n', (6381, 6391), False, 'import copy\n'), ((7365, 7388), 'os.walk', 'os.walk', (['directory_path'], {}), '(directory_path)\n', (7372, 7388), False, 'import os, os.path\n'), ((2461, 2500), 'unidecode.unidecode', 'unidecode.unidecode', (['relevant_tokens[i]'], {}), '(relevant_tokens[i])\n', (2480, 2500), False, 'import unidecode\n')]
|
# -*- encoding: utf-8 -*-
from django.test import TestCase
from unit_field.units import Unit, UnitValue, get_choices
class UnitTest(TestCase):
def test_attribute_factor(self):
"""
the attribtue "factor" can be set
"""
e = Unit(0.01, 'cm', 'centimetre')
self.assertEqual(e.factor, 0.01)
e.factor = 0.32
self.assertEqual(e.factor, 0.32)
def test_attribute_abbrev(self):
"""
the attribtue "abbrev" can be set
"""
e = Unit(0.01, u'cm', u'centimetre')
self.assertEqual(e.abbrev, u'cm')
e.abbrev = u'cm²'
self.assertEqual(e.abbrev, u'cm²')
def test_attribute_label(self):
"""
the attribtue "label" can be set
"""
e = Unit(0.01, u'cm', u'centimetre')
self.assertEqual(e.label, u'centimetre')
e.label = u'metre'
self.assertEqual(e.label, u'metre')
class UnitValueTest(TestCase):
def test_attribute_input(self):
"""
the attribtue "input" can be set
"""
e = UnitValue(7.1, 0.01)
self.assertEqual(e.input, 7.1)
e.input = 4
self.assertEqual(e.input, 4)
def test_attribute_unit(self):
"""
the attribtue "input" can be set
"""
e = UnitValue(7.1, 0.01)
self.assertEqual(e.unit, 0.01)
e.unit = 0.1
self.assertEqual(e.unit, 0.1)
def test_property_value(self):
"""
the property "value" returns the user entered data (input)
multiplied by the unit factor (unit)
"""
testCases = [
{ 'params': [7.1, 0.1], 'result': 0.71 },
{ 'params': [0.1, 0.1], 'result': 0.01 },
{ 'params': [5, 0.01], 'result': 0.05 },
{ 'params': [2.4, 1000], 'result': 2400 },
]
for testCase in testCases:
uv = UnitValue(
testCase['params'][0],
testCase['params'][1])
self.assertAlmostEqual(uv.value, testCase['result'], places=12)
def test_get_choices(self):
"""
checks if the method "get_choices" transforms the data
in the correct way
"""
a = [
Unit(0.001, 'mm', 'milimetre' ),
Unit(0.01, 'cm', 'centimetre'),
Unit(0.1, 'dm', 'decimetre' ),
]
b = [(0.001, 'mm'), (0.01, 'cm'), (0.1, 'dm')]
self.assertEqual(get_choices(a), b)
|
[
"unit_field.units.UnitValue",
"unit_field.units.get_choices",
"unit_field.units.Unit"
] |
[((259, 289), 'unit_field.units.Unit', 'Unit', (['(0.01)', '"""cm"""', '"""centimetre"""'], {}), "(0.01, 'cm', 'centimetre')\n", (263, 289), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((512, 544), 'unit_field.units.Unit', 'Unit', (['(0.01)', 'u"""cm"""', 'u"""centimetre"""'], {}), "(0.01, u'cm', u'centimetre')\n", (516, 544), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((770, 802), 'unit_field.units.Unit', 'Unit', (['(0.01)', 'u"""cm"""', 'u"""centimetre"""'], {}), "(0.01, u'cm', u'centimetre')\n", (774, 802), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((1068, 1088), 'unit_field.units.UnitValue', 'UnitValue', (['(7.1)', '(0.01)'], {}), '(7.1, 0.01)\n', (1077, 1088), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((1298, 1318), 'unit_field.units.UnitValue', 'UnitValue', (['(7.1)', '(0.01)'], {}), '(7.1, 0.01)\n', (1307, 1318), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((1893, 1948), 'unit_field.units.UnitValue', 'UnitValue', (["testCase['params'][0]", "testCase['params'][1]"], {}), "(testCase['params'][0], testCase['params'][1])\n", (1902, 1948), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((2231, 2261), 'unit_field.units.Unit', 'Unit', (['(0.001)', '"""mm"""', '"""milimetre"""'], {}), "(0.001, 'mm', 'milimetre')\n", (2235, 2261), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((2276, 2306), 'unit_field.units.Unit', 'Unit', (['(0.01)', '"""cm"""', '"""centimetre"""'], {}), "(0.01, 'cm', 'centimetre')\n", (2280, 2306), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((2321, 2349), 'unit_field.units.Unit', 'Unit', (['(0.1)', '"""dm"""', '"""decimetre"""'], {}), "(0.1, 'dm', 'decimetre')\n", (2325, 2349), False, 'from unit_field.units import Unit, UnitValue, get_choices\n'), ((2445, 2459), 'unit_field.units.get_choices', 'get_choices', (['a'], {}), '(a)\n', (2456, 2459), False, 'from unit_field.units import Unit, UnitValue, get_choices\n')]
|
#!/usr/bin/python
import os
from depp import Model_pl
from depp import default_config
import pkg_resources
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from omegaconf import OmegaConf
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
def main():
args_base = OmegaConf.create(default_config.default_config)
args_cli = OmegaConf.from_cli()
# if args_cli.config_file is not None:
# args_cfg = OmegaConf.load(args_cli.config_file)
# args_base = OmegaConf.merge(args_base, args_cfg)
# args_base.exp_name = os.path.splitext(os.path.basename(args_cli.config_file))[0]
# elif args_cli.exp_name is None:
# raise ValueError('exp_name cannot be empty without specifying a config file')
# del args_cli['config_file']
args = OmegaConf.merge(args_base, args_cli)
model_dir = args.model_dir
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
summary_dir = args.summary_dir
if not os.path.isdir(summary_dir):
os.makedirs(summary_dir)
model = Model_pl.model(args=args)
logger = TensorBoardLogger(
save_dir=args.summary_dir,
name=args.exp_name
)
early_stop_callback = EarlyStopping(
monitor='val_loss',
min_delta=0.00,
patience=args.patience,
verbose=False,
mode='min'
)
checkpoint_callback = ModelCheckpoint(
filepath=model_dir,
save_top_k=1,
verbose=True,
monitor='val_loss',
mode='min',
prefix=''
)
print(model_dir)
if args.gpus == 0:
trainer = pl.Trainer(
logger=logger,
gpus=args.gpus,
progress_bar_refresh_rate=args.bar_update_freq,
check_val_every_n_epoch=args.val_freq,
max_epochs=args.epoch,
gradient_clip_val=args.cp,
benchmark=True,
callbacks=[early_stop_callback],
checkpoint_callback=checkpoint_callback
)
else:
trainer = pl.Trainer(
logger=logger,
gpus=args.gpus,
progress_bar_refresh_rate=args.bar_update_freq,
distributed_backend='ddp',
check_val_every_n_epoch=args.val_freq,
max_epochs=args.epoch,
gradient_clip_val=args.cp,
benchmark=True,
callbacks=[early_stop_callback],
checkpoint_callback=checkpoint_callback
)
trainer.fit(model)
if __name__ == '__main__':
main()
|
[
"pytorch_lightning.callbacks.ModelCheckpoint",
"pytorch_lightning.Trainer",
"os.makedirs",
"os.path.isdir",
"depp.Model_pl.model",
"omegaconf.OmegaConf.merge",
"omegaconf.OmegaConf.from_cli",
"omegaconf.OmegaConf.create",
"pytorch_lightning.loggers.TensorBoardLogger",
"pytorch_lightning.callbacks.early_stopping.EarlyStopping"
] |
[((457, 504), 'omegaconf.OmegaConf.create', 'OmegaConf.create', (['default_config.default_config'], {}), '(default_config.default_config)\n', (473, 504), False, 'from omegaconf import OmegaConf\n'), ((521, 541), 'omegaconf.OmegaConf.from_cli', 'OmegaConf.from_cli', ([], {}), '()\n', (539, 541), False, 'from omegaconf import OmegaConf\n'), ((965, 1001), 'omegaconf.OmegaConf.merge', 'OmegaConf.merge', (['args_base', 'args_cli'], {}), '(args_base, args_cli)\n', (980, 1001), False, 'from omegaconf import OmegaConf\n'), ((1224, 1249), 'depp.Model_pl.model', 'Model_pl.model', ([], {'args': 'args'}), '(args=args)\n', (1238, 1249), False, 'from depp import Model_pl\n'), ((1264, 1328), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', ([], {'save_dir': 'args.summary_dir', 'name': 'args.exp_name'}), '(save_dir=args.summary_dir, name=args.exp_name)\n', (1281, 1328), False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((1378, 1481), 'pytorch_lightning.callbacks.early_stopping.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0.0)', 'patience': 'args.patience', 'verbose': '(False)', 'mode': '"""min"""'}), "(monitor='val_loss', min_delta=0.0, patience=args.patience,\n verbose=False, mode='min')\n", (1391, 1481), False, 'from pytorch_lightning.callbacks.early_stopping import EarlyStopping\n'), ((1552, 1663), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'model_dir', 'save_top_k': '(1)', 'verbose': '(True)', 'monitor': '"""val_loss"""', 'mode': '"""min"""', 'prefix': '""""""'}), "(filepath=model_dir, save_top_k=1, verbose=True, monitor=\n 'val_loss', mode='min', prefix='')\n", (1567, 1663), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((1045, 1069), 'os.path.isdir', 'os.path.isdir', (['model_dir'], {}), '(model_dir)\n', (1058, 1069), False, 'import os\n'), ((1079, 1101), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (1090, 1101), False, 'import os\n'), ((1150, 1176), 'os.path.isdir', 'os.path.isdir', (['summary_dir'], {}), '(summary_dir)\n', (1163, 1176), False, 'import os\n'), ((1186, 1210), 'os.makedirs', 'os.makedirs', (['summary_dir'], {}), '(summary_dir)\n', (1197, 1210), False, 'import os\n'), ((1775, 2058), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'logger': 'logger', 'gpus': 'args.gpus', 'progress_bar_refresh_rate': 'args.bar_update_freq', 'check_val_every_n_epoch': 'args.val_freq', 'max_epochs': 'args.epoch', 'gradient_clip_val': 'args.cp', 'benchmark': '(True)', 'callbacks': '[early_stop_callback]', 'checkpoint_callback': 'checkpoint_callback'}), '(logger=logger, gpus=args.gpus, progress_bar_refresh_rate=args.\n bar_update_freq, check_val_every_n_epoch=args.val_freq, max_epochs=args\n .epoch, gradient_clip_val=args.cp, benchmark=True, callbacks=[\n early_stop_callback], checkpoint_callback=checkpoint_callback)\n', (1785, 2058), True, 'import pytorch_lightning as pl\n'), ((2190, 2504), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'logger': 'logger', 'gpus': 'args.gpus', 'progress_bar_refresh_rate': 'args.bar_update_freq', 'distributed_backend': '"""ddp"""', 'check_val_every_n_epoch': 'args.val_freq', 'max_epochs': 'args.epoch', 'gradient_clip_val': 'args.cp', 'benchmark': '(True)', 'callbacks': '[early_stop_callback]', 'checkpoint_callback': 'checkpoint_callback'}), "(logger=logger, gpus=args.gpus, progress_bar_refresh_rate=args.\n bar_update_freq, distributed_backend='ddp', check_val_every_n_epoch=\n args.val_freq, max_epochs=args.epoch, gradient_clip_val=args.cp,\n benchmark=True, callbacks=[early_stop_callback], checkpoint_callback=\n checkpoint_callback)\n", (2200, 2504), True, 'import pytorch_lightning as pl\n')]
|
from mltoolkit.mldp.steps.transformers import BaseTransformer
from copy import deepcopy
class FieldDuplicator(BaseTransformer):
"""Duplicates fields by giving them new names."""
def __init__(self, old_to_new_fnames, **kwargs):
super(FieldDuplicator, self).__init__(**kwargs)
self.old_to_new_fnames = old_to_new_fnames
def _transform(self, data_chunk):
for old_fn, new_fn in self.old_to_new_fnames.items():
data_chunk[new_fn] = deepcopy(data_chunk[old_fn])
return data_chunk
|
[
"copy.deepcopy"
] |
[((479, 507), 'copy.deepcopy', 'deepcopy', (['data_chunk[old_fn]'], {}), '(data_chunk[old_fn])\n', (487, 507), False, 'from copy import deepcopy\n')]
|
import imp
import sys
import os
from ..common import *
class LoaderError(ImportError):
"""
This error is thrown when the module loader encounters an exception or
an unrecoverable state while attempting to load a dynamically located
module.
"""
def __init__(self, msg):
"""
Creates a loader error.
:param msg: the loader error message to relay upwards
"""
self.msg = msg
class ModuleLoader(object):
"""
As per PEP302, this module loader provides all of the necessary context
for loading a python file and executing its contents.
"""
def __init__(self, module_path, module_name, load_target, is_pkg):
"""
Creates a new module loader.
:param module_path: the path where the module resides
:param module_name: the full name of the module to load
:param load_target: the absolute path of the module file to load
:param is_pkg: true if this module is a package, false otherwise
"""
self.module_path = module_path
self.module_name = module_name
self.load_target = load_target
self.is_pkg = is_pkg
def load_module_py_path(self, module_name, path):
file_ext = os.path.splitext(path)[1]
module = None
if file_ext.lower() == '.py':
module = imp.load_source(module_name, path)
elif file_ext.lower() == '.pyc':
module = imp.load_compiled(module_name, path)
return module
def load_module(self, module_name):
"""
Loads a module's code and sets the module's expected hidden
variables. For more information on these variables and what they
are for, please see PEP302.
:param module_name: the full name of the module to load
"""
if module_name != self.module_name:
raise LoaderError(
'Requesting a module that the loader is unaware of.')
if module_name in sys.modules:
return sys.modules[module_name]
module = self.load_module_py_path(module_name, self.load_target)
if self.is_pkg:
module.__path__ = [self.module_path]
module.__package__ = module_name
else:
module.__package__ = module_name.rpartition('.')[0]
sys.modules[module_name] = module
return module
class ModuleFinder(object):
"""
As per PEP302, this module loader provides all of the necessary context
for dynamically locating python modules based. This finder searches
directories based on paths added to its internal list of available
search directories.
"""
def __init__(self, paths=None):
"""
Creates a module finder.
:param paths: the paths to include in the search list by default
"""
self.paths = paths if paths else list()
def add_path(self, path):
"""
Adds a path to search through when attempting to look up a module.
:param path: the path the add to the list of searchable paths
"""
if path not in self.paths:
self.paths.append(path)
def find_module(self, module_name, path=None):
"""
Searches the paths for the required module.
:param module_name: the full name of the module to find
:param path: set to None when the module in being searched for is a
top-level module - otherwise this is set to
package.__path__ for submodules and subpackages (unused)
"""
module_path = os.path.join(*module_name.split(MODULE_PATH_SEP))
for search_root in self.paths:
target_path = os.path.join(search_root, module_path)
is_pkg = False
# If the target references a directory, try to load it as
# a module by referencing the __init__.py file, otherwise
# append .py and attempt to resolve it.
if os.path.isdir(target_path):
target_file = os.path.join(target_path, '__init__.py')
is_pkg = True
else:
target_file = '{}.py'.format(target_path)
if os.path.exists(target_file):
return ModuleLoader(
target_path, module_name, target_file, is_pkg)
return None
|
[
"os.path.isdir",
"imp.load_compiled",
"os.path.exists",
"imp.load_source",
"os.path.splitext",
"os.path.join"
] |
[((1243, 1265), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1259, 1265), False, 'import os\n'), ((1350, 1384), 'imp.load_source', 'imp.load_source', (['module_name', 'path'], {}), '(module_name, path)\n', (1365, 1384), False, 'import imp\n'), ((3704, 3742), 'os.path.join', 'os.path.join', (['search_root', 'module_path'], {}), '(search_root, module_path)\n', (3716, 3742), False, 'import os\n'), ((3978, 4004), 'os.path.isdir', 'os.path.isdir', (['target_path'], {}), '(target_path)\n', (3991, 4004), False, 'import os\n'), ((4199, 4226), 'os.path.exists', 'os.path.exists', (['target_file'], {}), '(target_file)\n', (4213, 4226), False, 'import os\n'), ((1447, 1483), 'imp.load_compiled', 'imp.load_compiled', (['module_name', 'path'], {}), '(module_name, path)\n', (1464, 1483), False, 'import imp\n'), ((4036, 4076), 'os.path.join', 'os.path.join', (['target_path', '"""__init__.py"""'], {}), "(target_path, '__init__.py')\n", (4048, 4076), False, 'import os\n')]
|
"""
Run users examples to check authentication.
"""
from pprint import pprint
from eventstore_grpc.options import base_options
from eventstore_grpc import EventStoreDBClient, JSONEventData
conn_str = "esdb://localhost:2111,localhost:2112,localhost:2113?tls&rootCertificate=./tests/certs/ca/ca.crt"
default_user = {"username": "admin", "password": "<PASSWORD>"}
credentials = base_options.as_credentials(**default_user)
client = EventStoreDBClient(conn_str)
# Create new admin user.
new_user = {
"login_name": "john-doe",
"password": "<PASSWORD>",
"full_name": "<NAME>",
"groups": ["$admins", "event-store-guys"],
}
print("Creating new user:")
pprint(new_user)
client.create_user(**new_user, credentials=credentials)
# Check user details.
user_details = client.get_user_details(new_user["login_name"], credentials=credentials)
print("\nUSER DETAILS:")
for elm in user_details:
print(f"FULL NAME: {elm.user_details.full_name}")
print(f"LOGIN NAME: {elm.user_details.login_name}")
print(f"GROUPS : {elm.user_details.groups}")
# Delete user.
print("\nDeleting user...")
result = client.delete_user(new_user["login_name"], credentials=credentials)
print(f"Poor {new_user['full_name'].split()[0]}'s gone 😢")
|
[
"pprint.pprint",
"eventstore_grpc.EventStoreDBClient",
"eventstore_grpc.options.base_options.as_credentials"
] |
[((378, 421), 'eventstore_grpc.options.base_options.as_credentials', 'base_options.as_credentials', ([], {}), '(**default_user)\n', (405, 421), False, 'from eventstore_grpc.options import base_options\n'), ((432, 460), 'eventstore_grpc.EventStoreDBClient', 'EventStoreDBClient', (['conn_str'], {}), '(conn_str)\n', (450, 460), False, 'from eventstore_grpc import EventStoreDBClient, JSONEventData\n'), ((665, 681), 'pprint.pprint', 'pprint', (['new_user'], {}), '(new_user)\n', (671, 681), False, 'from pprint import pprint\n')]
|
from __future__ import print_function
import html5lib
from unittest import TestCase
from fluent_contents.utils.html import clean_html
class TextPluginTests(TestCase):
"""
Test whether the sanitation works as expected.
"""
HTML1_ORIGINAL = u'<p><img src="/media/image.jpg" alt="" width="460" height="300" /> <img style="float: left;" src="/media/image2.jpg" alt="" width="460" height="130" /></p><p> </p>'
def test_broken_html5lib(self):
"""
Test againt https://github.com/html5lib/html5lib-python/issues/189
"""
msg = "This version of html5lib is known to break relative URLs!\nUse version 0.999 instead."
self.assertTrue(html5lib.__version__ not in ['1.0b5', '1.0b6', '0.9999', '0.99999'], msg)
def test_clean_html5lib(self):
"""
Test how clean performs.
"""
cleaned = clean_html(self.HTML1_ORIGINAL)
self.assertTrue('460' in cleaned, u"Missing elements in {0}".format(cleaned))
self.assertTrue('float: left' in cleaned, u"Missing elements in {0}".format(cleaned))
self.assertTrue('/media/image.jpg' in cleaned, u"Missing elements in {0}".format(cleaned))
self.assertTrue('/media/image2.jpg' in cleaned, u"Missing elements in {0}".format(cleaned))
def test_sanitize_html5lib(self):
"""
Test whether the sanitize feature doesn't completely break pages.
"""
sanitized = clean_html(self.HTML1_ORIGINAL, sanitize=True)
self.assertTrue('460' in sanitized, u"Missing elements in {0}".format(sanitized))
self.assertTrue('float: left' in sanitized, u"Missing elements in {0}".format(sanitized))
self.assertTrue('/media/image.jpg' in sanitized, u"Missing elements in {0}".format(sanitized))
self.assertTrue('/media/image2.jpg' in sanitized, u"Missing elements in {0}".format(sanitized))
|
[
"fluent_contents.utils.html.clean_html"
] |
[((887, 918), 'fluent_contents.utils.html.clean_html', 'clean_html', (['self.HTML1_ORIGINAL'], {}), '(self.HTML1_ORIGINAL)\n', (897, 918), False, 'from fluent_contents.utils.html import clean_html\n'), ((1455, 1501), 'fluent_contents.utils.html.clean_html', 'clean_html', (['self.HTML1_ORIGINAL'], {'sanitize': '(True)'}), '(self.HTML1_ORIGINAL, sanitize=True)\n', (1465, 1501), False, 'from fluent_contents.utils.html import clean_html\n')]
|
import requests
from fidesops.task.filter_results import filter_data_categories
import pytest
import random
from fidesops.graph.graph import DatasetGraph
from fidesops.models.privacy_request import PrivacyRequest
from fidesops.schemas.redis_cache import PrivacyRequestIdentity
from fidesops.task import graph_task
from fidesops.task.graph_task import get_cached_data_for_erasures
from tests.graph.graph_test_util import assert_rows_match
@pytest.mark.integration_saas
@pytest.mark.integration_sentry
def test_sentry_access_request_task(
db,
policy,
sentry_connection_config,
sentry_dataset_config,
sentry_identity_email,
) -> None:
"""Full access request based on the Sentry SaaS config"""
privacy_request = PrivacyRequest(
id=f"test_saas_access_request_task_{random.randint(0, 1000)}"
)
identity = PrivacyRequestIdentity(**{"email": sentry_identity_email})
privacy_request.cache_identity(identity)
dataset_name = sentry_connection_config.get_saas_config().fides_key
merged_graph = sentry_dataset_config.get_graph()
graph = DatasetGraph(merged_graph)
v = graph_task.run_access_request(
privacy_request,
policy,
graph,
[sentry_connection_config],
{"email": sentry_identity_email},
)
assert_rows_match(
v[f"{dataset_name}:organizations"],
min_size=1,
keys=[
"id",
"slug",
"status",
"name",
"dateCreated",
"isEarlyAdopter",
"require2FA",
"requireEmailVerification",
"avatar",
"features",
],
)
assert_rows_match(
v[f"{dataset_name}:employees"],
min_size=1,
keys=[
"id",
"email",
"name",
"user",
"role",
"roleName",
"pending",
"expired",
"flags",
"dateCreated",
"inviteStatus",
"inviterName",
"projects",
],
)
assert_rows_match(
v[f"{dataset_name}:projects"],
min_size=3,
keys=[
"id",
"slug",
"name",
"isPublic",
"isBookmarked",
"color",
"dateCreated",
"firstEvent",
"firstTransactionEvent",
"hasSessions",
"features",
"status",
"platform",
"isInternal",
"isMember",
"hasAccess",
"avatar",
"organization",
],
)
assert_rows_match(
v[f"{dataset_name}:user_feedback"],
min_size=1,
keys=[
"id",
"eventID",
"name",
"email",
"comments",
"dateCreated",
"user",
"event",
"issue",
],
)
# Person returns empty dicts
assert_rows_match(
v[f"{dataset_name}:person"],
min_size=1,
keys=[
"id",
"hash",
"tagValue",
"identifier",
"username",
"email",
"name",
"ipAddress",
"dateCreated",
"avatarUrl",
],
)
target_categories = {"user.provided"}
filtered_results = filter_data_categories(
v,
target_categories,
graph.data_category_field_mapping,
)
assert set(filtered_results.keys()) == {
f"{dataset_name}:person",
f"{dataset_name}:employees",
f"{dataset_name}:user_feedback",
}
assert set(filtered_results[f"{dataset_name}:person"][0].keys()) == {
"email",
"name",
"username",
}
assert (
filtered_results[f"{dataset_name}:person"][0]["email"] == sentry_identity_email
)
assert set(filtered_results[f"{dataset_name}:employees"][0].keys()) == {
"email",
"user",
"name",
}
assert (
filtered_results[f"{dataset_name}:employees"][0]["email"]
== sentry_identity_email
)
assert set(filtered_results[f"{dataset_name}:employees"][0]["user"].keys()) == {
"email",
"name",
"avatarUrl",
"username",
"emails",
}
assert (
filtered_results[f"{dataset_name}:employees"][0]["user"]["email"]
== sentry_identity_email
)
assert filtered_results[f"{dataset_name}:employees"][0]["user"]["emails"] == [
{"email": sentry_identity_email}
]
assert set(filtered_results[f"{dataset_name}:user_feedback"][0].keys()) == {
"email",
"user",
"comments",
"name",
}
assert (
filtered_results[f"{dataset_name}:user_feedback"][0]["email"]
== sentry_identity_email
)
assert set(filtered_results[f"{dataset_name}:user_feedback"][0]["user"].keys()) == {
"email",
"name",
"username",
}
assert (
filtered_results[f"{dataset_name}:user_feedback"][0]["user"]["email"]
== sentry_identity_email
)
def sentry_erasure_test_prep(sentry_connection_config, db):
sentry_secrets = sentry_connection_config.secrets
# Set the assignedTo field on a sentry issue to a given employee
token = sentry_secrets.get("erasure_access_token")
issue_url = sentry_secrets.get("issue_url")
sentry_user_id = sentry_secrets.get("user_id_erasure")
if not token or not issue_url or not sentry_user_id:
# Exit early if these haven't been set locally
return None, None, None
headers = {"Authorization": f"Bearer {token}"}
data = {"assignedTo": f"user:{sentry_user_id}"}
resp = requests.put(issue_url, json=data, headers=headers)
assert resp.status_code == 200
assert resp.json()["assignedTo"]["id"] == sentry_user_id
# Temporarily sets the access token to one that works for erasures
sentry_connection_config.secrets["access_token"] = sentry_secrets[
"erasure_access_token"
]
sentry_connection_config.save(db)
# Grab a separate email for erasures
erasure_email = sentry_secrets["erasure_identity_email"]
return erasure_email, issue_url, headers
@pytest.mark.integration_saas
@pytest.mark.integration_sentry
def test_sentry_erasure_request_task(
db, policy, sentry_connection_config, sentry_dataset_config
) -> None:
"""Full erasure request based on the Sentry SaaS config. Also verifies issue data in access request"""
erasure_email, issue_url, headers = sentry_erasure_test_prep(
sentry_connection_config, db
)
privacy_request = PrivacyRequest(
id=f"test_saas_access_request_task_{random.randint(0, 1000)}"
)
identity = PrivacyRequestIdentity(**{"email": erasure_email})
privacy_request.cache_identity(identity)
dataset_name = sentry_connection_config.get_saas_config().fides_key
merged_graph = sentry_dataset_config.get_graph()
graph = DatasetGraph(merged_graph)
v = graph_task.run_access_request(
privacy_request,
policy,
graph,
[sentry_connection_config],
{"email": erasure_email},
)
assert_rows_match(
v[f"{dataset_name}:organizations"],
min_size=1,
keys=[
"id",
"slug",
"status",
"name",
"dateCreated",
"isEarlyAdopter",
"require2FA",
"requireEmailVerification",
"avatar",
"features",
],
)
assert_rows_match(
v[f"{dataset_name}:employees"],
min_size=1,
keys=[
"id",
"email",
"name",
"user",
"role",
"roleName",
"pending",
"expired",
"flags",
"dateCreated",
"inviteStatus",
"inviterName",
"projects",
],
)
assert_rows_match(
v[f"{dataset_name}:issues"],
min_size=1,
keys=[
"id",
"shareId",
"shortId",
"title",
"culprit",
"permalink",
"logger",
"level",
"status",
"statusDetails",
"isPublic",
"platform",
"project",
"type",
"metadata",
"numComments",
"assignedTo",
"isBookmarked",
"isSubscribed",
"subscriptionDetails",
"hasSeen",
"annotations",
"isUnhandled",
"count",
"userCount",
"firstSeen",
"lastSeen",
"stats",
],
)
assert v[f"{dataset_name}:issues"][0]["assignedTo"]["email"] == erasure_email
x = graph_task.run_erasure(
privacy_request,
policy,
graph,
[sentry_connection_config],
{"email": erasure_email},
get_cached_data_for_erasures(privacy_request.id),
)
# Masking request only issued to "issues" endpoint
assert x == {
"sentry_connector:projects": 0,
"sentry_connector:person": 0,
"sentry_connector:issues": 1,
"sentry_connector:organizations": 0,
"sentry_connector:user_feedback": 0,
"sentry_connector:employees": 0,
}
# Verify the user has been assigned to None
resp = requests.get(issue_url, headers=headers).json()
assert resp["assignedTo"] is None
|
[
"fidesops.graph.graph.DatasetGraph",
"fidesops.task.graph_task.run_access_request",
"tests.graph.graph_test_util.assert_rows_match",
"fidesops.schemas.redis_cache.PrivacyRequestIdentity",
"fidesops.task.graph_task.get_cached_data_for_erasures",
"random.randint",
"fidesops.task.filter_results.filter_data_categories",
"requests.get",
"requests.put"
] |
[((849, 907), 'fidesops.schemas.redis_cache.PrivacyRequestIdentity', 'PrivacyRequestIdentity', ([], {}), "(**{'email': sentry_identity_email})\n", (871, 907), False, 'from fidesops.schemas.redis_cache import PrivacyRequestIdentity\n'), ((1091, 1117), 'fidesops.graph.graph.DatasetGraph', 'DatasetGraph', (['merged_graph'], {}), '(merged_graph)\n', (1103, 1117), False, 'from fidesops.graph.graph import DatasetGraph\n'), ((1127, 1255), 'fidesops.task.graph_task.run_access_request', 'graph_task.run_access_request', (['privacy_request', 'policy', 'graph', '[sentry_connection_config]', "{'email': sentry_identity_email}"], {}), "(privacy_request, policy, graph, [\n sentry_connection_config], {'email': sentry_identity_email})\n", (1156, 1255), False, 'from fidesops.task import graph_task\n'), ((1303, 1513), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:organizations']"], {'min_size': '(1)', 'keys': "['id', 'slug', 'status', 'name', 'dateCreated', 'isEarlyAdopter',\n 'require2FA', 'requireEmailVerification', 'avatar', 'features']"}), "(v[f'{dataset_name}:organizations'], min_size=1, keys=[\n 'id', 'slug', 'status', 'name', 'dateCreated', 'isEarlyAdopter',\n 'require2FA', 'requireEmailVerification', 'avatar', 'features'])\n", (1320, 1513), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((1671, 1887), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:employees']"], {'min_size': '(1)', 'keys': "['id', 'email', 'name', 'user', 'role', 'roleName', 'pending', 'expired',\n 'flags', 'dateCreated', 'inviteStatus', 'inviterName', 'projects']"}), "(v[f'{dataset_name}:employees'], min_size=1, keys=['id',\n 'email', 'name', 'user', 'role', 'roleName', 'pending', 'expired',\n 'flags', 'dateCreated', 'inviteStatus', 'inviterName', 'projects'])\n", (1688, 1887), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((2082, 2392), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:projects']"], {'min_size': '(3)', 'keys': "['id', 'slug', 'name', 'isPublic', 'isBookmarked', 'color', 'dateCreated',\n 'firstEvent', 'firstTransactionEvent', 'hasSessions', 'features',\n 'status', 'platform', 'isInternal', 'isMember', 'hasAccess', 'avatar',\n 'organization']"}), "(v[f'{dataset_name}:projects'], min_size=3, keys=['id',\n 'slug', 'name', 'isPublic', 'isBookmarked', 'color', 'dateCreated',\n 'firstEvent', 'firstTransactionEvent', 'hasSessions', 'features',\n 'status', 'platform', 'isInternal', 'isMember', 'hasAccess', 'avatar',\n 'organization'])\n", (2099, 2392), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((2640, 2808), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:user_feedback']"], {'min_size': '(1)', 'keys': "['id', 'eventID', 'name', 'email', 'comments', 'dateCreated', 'user',\n 'event', 'issue']"}), "(v[f'{dataset_name}:user_feedback'], min_size=1, keys=[\n 'id', 'eventID', 'name', 'email', 'comments', 'dateCreated', 'user',\n 'event', 'issue'])\n", (2657, 2808), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((2988, 3171), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:person']"], {'min_size': '(1)', 'keys': "['id', 'hash', 'tagValue', 'identifier', 'username', 'email', 'name',\n 'ipAddress', 'dateCreated', 'avatarUrl']"}), "(v[f'{dataset_name}:person'], min_size=1, keys=['id',\n 'hash', 'tagValue', 'identifier', 'username', 'email', 'name',\n 'ipAddress', 'dateCreated', 'avatarUrl'])\n", (3005, 3171), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((3392, 3471), 'fidesops.task.filter_results.filter_data_categories', 'filter_data_categories', (['v', 'target_categories', 'graph.data_category_field_mapping'], {}), '(v, target_categories, graph.data_category_field_mapping)\n', (3414, 3471), False, 'from fidesops.task.filter_results import filter_data_categories\n'), ((5764, 5815), 'requests.put', 'requests.put', (['issue_url'], {'json': 'data', 'headers': 'headers'}), '(issue_url, json=data, headers=headers)\n', (5776, 5815), False, 'import requests\n'), ((6801, 6851), 'fidesops.schemas.redis_cache.PrivacyRequestIdentity', 'PrivacyRequestIdentity', ([], {}), "(**{'email': erasure_email})\n", (6823, 6851), False, 'from fidesops.schemas.redis_cache import PrivacyRequestIdentity\n'), ((7035, 7061), 'fidesops.graph.graph.DatasetGraph', 'DatasetGraph', (['merged_graph'], {}), '(merged_graph)\n', (7047, 7061), False, 'from fidesops.graph.graph import DatasetGraph\n'), ((7071, 7191), 'fidesops.task.graph_task.run_access_request', 'graph_task.run_access_request', (['privacy_request', 'policy', 'graph', '[sentry_connection_config]', "{'email': erasure_email}"], {}), "(privacy_request, policy, graph, [\n sentry_connection_config], {'email': erasure_email})\n", (7100, 7191), False, 'from fidesops.task import graph_task\n'), ((7239, 7449), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:organizations']"], {'min_size': '(1)', 'keys': "['id', 'slug', 'status', 'name', 'dateCreated', 'isEarlyAdopter',\n 'require2FA', 'requireEmailVerification', 'avatar', 'features']"}), "(v[f'{dataset_name}:organizations'], min_size=1, keys=[\n 'id', 'slug', 'status', 'name', 'dateCreated', 'isEarlyAdopter',\n 'require2FA', 'requireEmailVerification', 'avatar', 'features'])\n", (7256, 7449), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((7607, 7823), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:employees']"], {'min_size': '(1)', 'keys': "['id', 'email', 'name', 'user', 'role', 'roleName', 'pending', 'expired',\n 'flags', 'dateCreated', 'inviteStatus', 'inviterName', 'projects']"}), "(v[f'{dataset_name}:employees'], min_size=1, keys=['id',\n 'email', 'name', 'user', 'role', 'roleName', 'pending', 'expired',\n 'flags', 'dateCreated', 'inviteStatus', 'inviterName', 'projects'])\n", (7624, 7823), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((8018, 8446), 'tests.graph.graph_test_util.assert_rows_match', 'assert_rows_match', (["v[f'{dataset_name}:issues']"], {'min_size': '(1)', 'keys': "['id', 'shareId', 'shortId', 'title', 'culprit', 'permalink', 'logger',\n 'level', 'status', 'statusDetails', 'isPublic', 'platform', 'project',\n 'type', 'metadata', 'numComments', 'assignedTo', 'isBookmarked',\n 'isSubscribed', 'subscriptionDetails', 'hasSeen', 'annotations',\n 'isUnhandled', 'count', 'userCount', 'firstSeen', 'lastSeen', 'stats']"}), "(v[f'{dataset_name}:issues'], min_size=1, keys=['id',\n 'shareId', 'shortId', 'title', 'culprit', 'permalink', 'logger',\n 'level', 'status', 'statusDetails', 'isPublic', 'platform', 'project',\n 'type', 'metadata', 'numComments', 'assignedTo', 'isBookmarked',\n 'isSubscribed', 'subscriptionDetails', 'hasSeen', 'annotations',\n 'isUnhandled', 'count', 'userCount', 'firstSeen', 'lastSeen', 'stats'])\n", (8035, 8446), False, 'from tests.graph.graph_test_util import assert_rows_match\n'), ((9055, 9103), 'fidesops.task.graph_task.get_cached_data_for_erasures', 'get_cached_data_for_erasures', (['privacy_request.id'], {}), '(privacy_request.id)\n', (9083, 9103), False, 'from fidesops.task.graph_task import get_cached_data_for_erasures\n'), ((9498, 9538), 'requests.get', 'requests.get', (['issue_url'], {'headers': 'headers'}), '(issue_url, headers=headers)\n', (9510, 9538), False, 'import requests\n'), ((802, 825), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (816, 825), False, 'import random\n'), ((6754, 6777), 'random.randint', 'random.randint', (['(0)', '(1000)'], {}), '(0, 1000)\n', (6768, 6777), False, 'import random\n')]
|
import nltk
cor = nltk.corpus.brown.tagged_sents(categories='adventure')[:500]
print(len(cor))
from nltk.util import unique_list
tag_set = unique_list(tag for sent in cor for (word,tag) in sent)
print(len(tag_set))
symbols = unique_list(word for sent in cor for (word,tag) in sent)
print(len(symbols))
print(len(tag_set))
symbols = unique_list(word for sent in cor for (word,tag) in sent)
print(len(symbols))
trainer = nltk.tag.HiddenMarkovModelTrainer(tag_set, symbols)
train_corpus = []
test_corpus = []
for i in range(len(cor)):
if i % 10:
train_corpus+=[cor[i]]
else:
test_corpus+=[cor[i]]
print(len(train_corpus))
print(len(test_corpus))
|
[
"nltk.util.unique_list",
"nltk.corpus.brown.tagged_sents",
"nltk.tag.HiddenMarkovModelTrainer"
] |
[((139, 193), 'nltk.util.unique_list', 'unique_list', (['(tag for sent in cor for word, tag in sent)'], {}), '(tag for sent in cor for word, tag in sent)\n', (150, 193), False, 'from nltk.util import unique_list\n'), ((225, 280), 'nltk.util.unique_list', 'unique_list', (['(word for sent in cor for word, tag in sent)'], {}), '(word for sent in cor for word, tag in sent)\n', (236, 280), False, 'from nltk.util import unique_list\n'), ((332, 387), 'nltk.util.unique_list', 'unique_list', (['(word for sent in cor for word, tag in sent)'], {}), '(word for sent in cor for word, tag in sent)\n', (343, 387), False, 'from nltk.util import unique_list\n'), ((419, 470), 'nltk.tag.HiddenMarkovModelTrainer', 'nltk.tag.HiddenMarkovModelTrainer', (['tag_set', 'symbols'], {}), '(tag_set, symbols)\n', (452, 470), False, 'import nltk\n'), ((18, 72), 'nltk.corpus.brown.tagged_sents', 'nltk.corpus.brown.tagged_sents', ([], {'categories': '"""adventure"""'}), "(categories='adventure')\n", (48, 72), False, 'import nltk\n')]
|