code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
__author__ = 'kq4hy'
import csv
import sqlite3
def load_course_database(db_name, csv_filename):
conn = sqlite3.connect(db_name)
with conn:
curs = conn.cursor()
with open(csv_filename, 'rU') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
sql_cmd = "insert into coursedata values(?, ?, ?, ?, ?, ?, ?)"
curs.execute(sql_cmd, row)
load_course_database('course1.db', 'seas-courses-5years.csv') | [
"csv.reader",
"sqlite3.connect"
] | [((110, 134), 'sqlite3.connect', 'sqlite3.connect', (['db_name'], {}), '(db_name)\n', (125, 134), False, 'import sqlite3\n'), ((250, 269), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (260, 269), False, 'import csv\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
from page_objects import PageObject, PageElement
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
delay_min = 3 # sec
delay_medium = 5 # sec
delay_max = 9 # sec
class RegistrationPage(PageObject):
text_box_first_name = PageElement(id_='customer.firstName')
button_submit_form = PageElement(css="input[value='Register']")
form_submit_result_message = PageElement(id_='customer.lastName.errors')
def method_registration_page_clean_database(self, current_web_driver,):
self.text_box_first_name = 'name_first'
self.button_submit_form.click()
WebDriverWait(current_web_driver,delay_medium).until(expected_conditions.visibility_of(self.form_submit_result_message))
return
| [
"selenium.webdriver.support.expected_conditions.visibility_of",
"selenium.webdriver.support.ui.WebDriverWait",
"page_objects.PageElement"
] | [((439, 476), 'page_objects.PageElement', 'PageElement', ([], {'id_': '"""customer.firstName"""'}), "(id_='customer.firstName')\n", (450, 476), False, 'from page_objects import PageObject, PageElement\n'), ((502, 544), 'page_objects.PageElement', 'PageElement', ([], {'css': '"""input[value=\'Register\']"""'}), '(css="input[value=\'Register\']")\n', (513, 544), False, 'from page_objects import PageObject, PageElement\n'), ((578, 621), 'page_objects.PageElement', 'PageElement', ([], {'id_': '"""customer.lastName.errors"""'}), "(id_='customer.lastName.errors')\n", (589, 621), False, 'from page_objects import PageObject, PageElement\n'), ((848, 914), 'selenium.webdriver.support.expected_conditions.visibility_of', 'expected_conditions.visibility_of', (['self.form_submit_result_message'], {}), '(self.form_submit_result_message)\n', (881, 914), False, 'from selenium.webdriver.support import expected_conditions\n'), ((795, 842), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['current_web_driver', 'delay_medium'], {}), '(current_web_driver, delay_medium)\n', (808, 842), False, 'from selenium.webdriver.support.ui import WebDriverWait\n')] |
import torch
import torch.nn as nn
import numpy as np
from enum import Enum
from typing import List, Callable, Any
from tqdm import tqdm
from .model import Model
from .dataset import Dataset
from .experiment import Experiment
from .callback import Callback
class TrainingEvents(Enum):
START = 'on_start'
FINISH = 'on_finish'
TRAINING_EPOCH_START = 'on_training_epoch_start'
TRAINING_EPOCH_FINISH = 'on_training_epoch_finish'
TRAINING_BATCH_START = 'on_training_batch_start'
TRAINING_BATCH_FINISH = 'on_training_batch_finish'
VALIDATION_EPOCH_START = 'on_validation_epoch_start'
VALIDATION_EPOCH_FINISH = 'on_validation_epoch_finish'
VALIDATION_BATCH_START = 'on_validation_batch_start'
VALIDATION_BATCH_FINISH = 'on_validation_batch_finish'
TESTING_START = 'on_testing_start'
TESTING_FINISH = 'on_testing_finish'
TESTING_BATCH_START = 'on_testing_batch_start'
TESTING_BATCH_FINISH = 'on_testing_batch_finish'
class TrainingStrategy:
def __init__(self, model: Model, dataset: Dataset, optimizer: torch.optim.Optimizer, experiment: Experiment = None,
callbacks: List[Callback] = None):
# properties
self.model: Model = model
self.dataset: Dataset = dataset
self.optimizer: torch.optim.Optimizer = optimizer
#self.experiment: Experiment = experiment
self.callbacks: List[Callback] = callbacks
# parallelize network depending on experiment settings
# if len(self.experiment.devices) > 1:
# self.network = nn.DataParallel(self.model.network, device_ids=self.experiment.devices)
# else:
self.network = self.model.network
# event handler
self.handlers = {k: [] for k in TrainingEvents}
# register events
for event in TrainingEvents:
# model events
self.on_event(event, getattr(self.model, event.value))
# callback events
for c in self.callbacks:
self.on_event(event, getattr(c, event.value))
def on_event(self, event: TrainingEvents, handler: Callable):
self.handlers[event].append(handler)
def emit(self, event: TrainingEvents, *args, **kwargs):
for handler in self.handlers[event]:
handler(*args, **kwargs)
def training_epoch(self, epoch: int) -> None:
raise NotImplementedError
def validation_epoch(self, epoch: int) -> None:
raise NotImplementedError
def test(self) -> None:
raise NotImplementedError
def __call__(self, n_epochs: int = 1, validation: bool = True, verbose: bool = True):
if verbose:
print(f'Training{" and validating" if validation else ""}'
f' for {n_epochs} {"epochs" if n_epochs > 1 else "epoch"}')
self.model.print_summary()
self.dataset.print_summary()
print(f'Optimizer: {self.optimizer.__class__.__name__}\n'
f'\tLearning rate: {self.optimizer.param_groups[0]["lr"]}')
print(f'Callbacks: {", ".join(c.__class__.__name__ for c in self.callbacks)}')
for epoch in range(n_epochs):
print(f'\nEpoch {epoch}:')
self.training_epoch(epoch)
if validation:
self.validation_epoch(epoch)
class SupervisedTraining(TrainingStrategy):
def training_epoch(self, epoch: int) -> None:
self.model.network.train()
self.emit(TrainingEvents.TRAINING_EPOCH_START, epoch, self.model)
with tqdm(self.dataset.training_dataloader(), desc='Training', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.TRAINING_BATCH_START, batch, batch_id, epoch)
loss, y_pred, y_true = self.model.training_fn(batch, batch_id, epoch)
self.model.backprop_fn(loss, self.optimizer)
self.emit(TrainingEvents.TRAINING_BATCH_FINISH, batch, batch_id, epoch,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.TRAINING_EPOCH_FINISH, epoch, self.model)
def validation_epoch(self, epoch: int) -> None:
self.model.network.eval()
self.model.network.train(False)
with torch.no_grad():
self.emit(TrainingEvents.VALIDATION_EPOCH_START, epoch, self.model)
with tqdm(self.dataset.validation_dataloader(), desc='Validation', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.VALIDATION_BATCH_START, batch, batch_id, epoch)
loss, y_pred, y_true = self.model.validation_fn(batch, batch_id, epoch)
self.emit(TrainingEvents.VALIDATION_BATCH_FINISH, batch, batch_id, epoch,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.VALIDATION_EPOCH_FINISH, epoch, self.model)
def test(self) -> None:
self.model.network.eval()
self.model.network.train(False)
with torch.no_grad():
self.emit(TrainingEvents.TESTING_START)
with tqdm(self.dataset.validation_dataloader(), desc='Testing', unit='batch') as t:
for batch_id, batch in enumerate(t):
self.emit(TrainingEvents.TESTING_BATCH_START, batch, batch_id)
loss, y_pred, y_true = self.model.validation_fn(batch, batch_id, -1)
self.emit(TrainingEvents.TESTING_BATCH_FINISH, batch, batch_id,
loss.detach(), y_pred.detach(), y_true)
# update progress bar
t.set_postfix(self.model.progressbar_metrics())
self.emit(TrainingEvents.TESTING_FINISH)
| [
"torch.no_grad"
] | [((4372, 4387), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4385, 4387), False, 'import torch\n'), ((5280, 5295), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5293, 5295), False, 'import torch\n')] |
import unittest
from pystac.utils import (make_relative_href, make_absolute_href,
is_absolute_href)
class UtilsTest(unittest.TestCase):
def test_make_relative_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('/a/b/c/d/catalog.json', '/a/b/c/catalog.json',
'./d/catalog.json'),
('/a/b/catalog.json', '/a/b/c/catalog.json', '../catalog.json'),
('/a/catalog.json', '/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', './d/catalog.json'),
('http://stacspec.org/a/b/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/catalog.json',
'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json',
'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_absolute_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./z/item.json', '/a/b/c/catalog.json', '/a/b/c/z/item.json'),
('../item.json', '/a/b/c/catalog.json', '/a/b/item.json'),
('item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/item.json'),
('./z/item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/c/z/item.json'),
('../item.json', 'https://stacgeo.org/a/b/c/catalog.json',
'https://stacgeo.org/a/b/item.json')
]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_is_absolute_href(self):
# Test cases of (href, expected)
test_cases = [('item.json', False), ('./item.json', False),
('../item.json', False), ('/item.json', True),
('http://stacgeo.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
| [
"pystac.utils.make_absolute_href",
"pystac.utils.is_absolute_href",
"pystac.utils.make_relative_href"
] | [((1333, 1376), 'pystac.utils.make_relative_href', 'make_relative_href', (['source_href', 'start_href'], {}), '(source_href, start_href)\n', (1351, 1376), False, 'from pystac.utils import make_relative_href, make_absolute_href, is_absolute_href\n'), ((2421, 2464), 'pystac.utils.make_absolute_href', 'make_absolute_href', (['source_href', 'start_href'], {}), '(source_href, start_href)\n', (2439, 2464), False, 'from pystac.utils import make_relative_href, make_absolute_href, is_absolute_href\n'), ((2854, 2876), 'pystac.utils.is_absolute_href', 'is_absolute_href', (['href'], {}), '(href)\n', (2870, 2876), False, 'from pystac.utils import make_relative_href, make_absolute_href, is_absolute_href\n')] |
from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton
from GUI.CustomWidgets.PathFileLineEdit import PathFileLineEdit
from GUI.CustomWidgets.InputField import InputField
class ExportPopUp(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.selected_shape_ids = []
self.selected_shape_names = []
self.selected_axis_system_id = None
self.dx_division = True
self.filepath = ""
self.setWindowTitle("Export Data")
self.axis_group = QGroupBox(self)
self.axis_group.setTitle("Axis System")
axis_systems = [axis_system["Name"] for axis_system in self.parent.workspace.shapes_tree.get_axis_systems()]
self.axis_system_ids = [axis_system["id"] for axis_system in self.parent.workspace.shapes_tree.get_axis_systems()]
self.axis_systems = QComboBox(self.axis_group)
self.axis_systems.addItem("(Choose Axis System)")
for axis_system in axis_systems:
self.axis_systems.addItem(axis_system)
self.axis_layout = QVBoxLayout(self.axis_group)
self.axis_layout.addWidget(self.axis_systems)
self.axis_group.setLayout(self.axis_layout)
self.shapes_group = QGroupBox(self)
self.shapes_group.setTitle("Shapes")
shapes = [shape["Name"] for shape in self.parent.workspace.shapes_tree.get_shapes()]
self.shape_ids = [shape["id"] for shape in self.parent.workspace.shapes_tree.get_shapes()]
self.shapes = []
for shape in shapes:
self.add_shape(shape)
self.shapes_layout = QGridLayout(self.shapes_group)
self.arrange_shapes_layout()
self.shapes_group.setLayout(self.shapes_layout)
self.options_group = QGroupBox(self)
self.options_group.setTitle("Spline Options")
self.radio_buttons = [QRadioButton(self.options_group) for i in range(2)]
self.radio_buttons[0].dx_division = True
self.radio_buttons[0].setText("dx Division")
self.radio_buttons[0].setChecked(True)
self.radio_buttons[1].dx_division = False
self.radio_buttons[1].setText("dt Division")
for radio in self.radio_buttons:
radio.toggled.connect(self.handle_radio_toggled)
self.num_of_divisions_value = 200
self.num_of_divisions = InputField(self.options_group, "Number of Points", str(self.num_of_divisions_value),
10, [2, 1], 170)
self.options_layout = QGridLayout(self.options_group)
for i in range(2):
self.options_layout.addWidget(self.radio_buttons[i], 0, i)
self.options_layout.addWidget(self.num_of_divisions, 1, 0, 1, 2)
self.options_group.setLayout(self.options_layout)
self.export_group = QGroupBox(self)
self.export_group.setTitle("File Export")
self.filepath_line_edit = PathFileLineEdit(self.export_group, "Export File", filename="plot_data",
filters="Excel Workbook (*.xlsx);; CSV (Comma Delimited) (*.csv)")
self.export_layout = QVBoxLayout(self.export_group)
self.export_layout.addWidget(self.filepath_line_edit)
self.export_group.setLayout(self.export_layout)
self.export_button = QPushButton(self)
self.export_button.setText("Export")
self.export_button.pressed.connect(self.handle_export)
self.layout = QVBoxLayout(self)
self.layout.addWidget(self.axis_group)
self.layout.addWidget(self.shapes_group)
self.layout.addWidget(self.options_group)
self.layout.addWidget(self.export_group)
self.layout.addWidget(self.export_button)
self.setLayout(self.layout)
self.setFixedSize(380, 300 + 10 * (len(self.shapes) + len(self.shapes) % 2))
self.show()
def handle_radio_toggled(self):
radio_button = self.sender()
if radio_button.isChecked():
self.dx_division = radio_button.dx_division
def arrange_shapes_layout(self):
i = 0
n = len(self.shapes)
rows = divmod(n + 1, 2)[0]
for shape in self.shapes:
col, row = divmod(i, rows)
self.shapes_layout.addWidget(shape, row, col)
i += 1
def add_shape(self, shape_name):
self.shapes.append(QCheckBox(shape_name))
def handle_export(self):
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
self.selected_shape_ids = [self.shape_ids[i] for i in range(len(self.shapes)) if self.shapes[i].isChecked()]
self.selected_shape_names = [shape.text() for shape in self.shapes if shape.isChecked()]
if self.axis_systems.currentIndex() == 0:
QMessageBox.warning(self, "Error", "Please select an Axis System!")
elif len(self.selected_shape_ids) == 0:
QMessageBox.warning(self, "Error", "Please select an least one graph for export!")
elif self.filepath_line_edit.text() == "":
QMessageBox.warning(self, "Error", "Please define file path!")
elif not is_int(self.num_of_divisions.value):
QMessageBox.warning(self, "Error", "Please define file path!")
else:
self.filepath = self.filepath_line_edit.text()
self.num_of_divisions_value = int(self.num_of_divisions.value)
self.selected_axis_system_id = self.axis_system_ids[self.axis_systems.currentIndex() - 1]
self.accept()
def closeEvent(self, a0):
self.reject()
| [
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtWidgets.QRadioButton",
"PyQt5.QtWidgets.QGridLayout",
"GUI.CustomWidgets.PathFileLineEdit.PathFileLineEdit",
"PyQt5.QtWidgets.QGroupBox",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QCheckBox",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QMessageBox.warn... | [((627, 642), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['self'], {}), '(self)\n', (636, 642), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((961, 987), 'PyQt5.QtWidgets.QComboBox', 'QComboBox', (['self.axis_group'], {}), '(self.axis_group)\n', (970, 987), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((1166, 1194), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self.axis_group'], {}), '(self.axis_group)\n', (1177, 1194), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((1330, 1345), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['self'], {}), '(self)\n', (1339, 1345), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((1704, 1734), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self.shapes_group'], {}), '(self.shapes_group)\n', (1715, 1734), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((1858, 1873), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['self'], {}), '(self)\n', (1867, 1873), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((2616, 2647), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', (['self.options_group'], {}), '(self.options_group)\n', (2627, 2647), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((2907, 2922), 'PyQt5.QtWidgets.QGroupBox', 'QGroupBox', (['self'], {}), '(self)\n', (2916, 2922), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((3008, 3151), 'GUI.CustomWidgets.PathFileLineEdit.PathFileLineEdit', 'PathFileLineEdit', (['self.export_group', '"""Export File"""'], {'filename': '"""plot_data"""', 'filters': '"""Excel Workbook (*.xlsx);; CSV (Comma Delimited) (*.csv)"""'}), "(self.export_group, 'Export File', filename='plot_data',\n filters='Excel Workbook (*.xlsx);; CSV (Comma Delimited) (*.csv)')\n", (3024, 3151), False, 'from GUI.CustomWidgets.PathFileLineEdit import PathFileLineEdit\n'), ((3229, 3259), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self.export_group'], {}), '(self.export_group)\n', (3240, 3259), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((3408, 3425), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', (['self'], {}), '(self)\n', (3419, 3425), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((3557, 3574), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self'], {}), '(self)\n', (3568, 3574), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((1959, 1991), 'PyQt5.QtWidgets.QRadioButton', 'QRadioButton', (['self.options_group'], {}), '(self.options_group)\n', (1971, 1991), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((4460, 4481), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', (['shape_name'], {}), '(shape_name)\n', (4469, 4481), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((4942, 5009), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Error"""', '"""Please select an Axis System!"""'], {}), "(self, 'Error', 'Please select an Axis System!')\n", (4961, 5009), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((5070, 5156), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Error"""', '"""Please select an least one graph for export!"""'], {}), "(self, 'Error',\n 'Please select an least one graph for export!')\n", (5089, 5156), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((5216, 5278), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Error"""', '"""Please define file path!"""'], {}), "(self, 'Error', 'Please define file path!')\n", (5235, 5278), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n'), ((5345, 5407), 'PyQt5.QtWidgets.QMessageBox.warning', 'QMessageBox.warning', (['self', '"""Error"""', '"""Please define file path!"""'], {}), "(self, 'Error', 'Please define file path!')\n", (5364, 5407), False, 'from PyQt5.QtWidgets import QDialog, QPushButton, QVBoxLayout, QComboBox, QGroupBox, QCheckBox, QGridLayout, QMessageBox, QRadioButton\n')] |
import matplotlib.pyplot as plt
from matplotlib import collections
from matplotlib.lines import Line2D
def autosize(fig=None, figsize=None):
## Take current figure if no figure provided
if fig is None:
fig = plt.gcf()
if figsize is None:
## Get size of figure
figsize = fig.get_size_inches()
else:
## Set size of figure
fig.set_size_inches(figsize)
## Make font sizes proportional to figure size
fontsize_labels = figsize[0] * 5
fontsize_ticks = fontsize_labels / 2
scatter_size = (figsize[0] * 1.5) ** 2
linewidth = figsize[0]
axes = fig.get_axes()
for ax in axes:
## Set label font sizes
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(fontsize_labels)
## Set tick font sizes
for item in ax.get_xticklabels() + ax.get_yticklabels():
item.set_fontsize(fontsize_ticks)
## Set line widths
plot_objs = [child for child in ax.get_children() if isinstance(child, Line2D)]
for plot_obj in plot_objs:
plot_obj.set_linewidth(linewidth)
## Set scatter point sizes
plot_objs = [
child
for child in ax.get_children()
if isinstance(child, collections.PathCollection)
]
for plot_obj in plot_objs:
plot_obj.set_sizes([scatter_size])
## Set tight layout
plt.tight_layout()
if __name__ == "__main__":
import numpy as np
from plottify import autosize
import matplotlib.pyplot as plt
n = 100
x = np.random.uniform(low=-5, high=5, size=n)
y = x + np.random.normal(scale=0.5, size=n)
for size in [3, 10, 20]:
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Default")
plt.show()
plt.figure(figsize=(size, size))
plt.scatter(x, y)
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Autosized")
autosize()
plt.show()
| [
"numpy.random.normal",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.random.uniform",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"plottify.autosize",
"matplotlib.pyplot.show"
] | [((1442, 1460), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1458, 1460), True, 'import matplotlib.pyplot as plt\n'), ((1603, 1644), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5)', 'high': '(5)', 'size': 'n'}), '(low=-5, high=5, size=n)\n', (1620, 1644), True, 'import numpy as np\n'), ((227, 236), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (234, 236), True, 'import matplotlib.pyplot as plt\n'), ((1657, 1692), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.5)', 'size': 'n'}), '(scale=0.5, size=n)\n', (1673, 1692), True, 'import numpy as np\n'), ((1732, 1764), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (1742, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1790), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1784, 1790), True, 'import matplotlib.pyplot as plt\n'), ((1799, 1814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1809, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1823, 1838), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1833, 1838), True, 'import matplotlib.pyplot as plt\n'), ((1847, 1867), 'matplotlib.pyplot.title', 'plt.title', (['"""Default"""'], {}), "('Default')\n", (1856, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1886), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1884, 1886), True, 'import matplotlib.pyplot as plt\n'), ((1896, 1928), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size, size)'}), '(figsize=(size, size))\n', (1906, 1928), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1954), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1948, 1954), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1978), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (1973, 1978), True, 'import matplotlib.pyplot as plt\n'), ((1987, 2002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (1997, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2033), 'matplotlib.pyplot.title', 'plt.title', (['"""Autosized"""'], {}), "('Autosized')\n", (2020, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2052), 'plottify.autosize', 'autosize', ([], {}), '()\n', (2050, 2052), False, 'from plottify import autosize\n'), ((2061, 2071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n')] |
from re import S
from manimlib import *
import sys
import os
from tqdm.std import tqdm
sys.path.append(os.getcwd())
from utils.imports import *
class Opening(Scene):
def construct(self):
title = Text("基础递推递归", font='msyh')
self.play(Write(title), run_time=2)
self.wait()
self.play(FadeOut(title))
self.wait()
return super().construct()
class BeginningIntroduction(Scene):
def construct(self):
RecurrenceFormula = Tex(
r"a_1=1 ,\quad a_n=a_{n-1}+1"
)
GeneralFormula = Tex(
r"a_n=n"
)
VGroup(RecurrenceFormula, GeneralFormula).arrange(DOWN, buff=LARGE_BUFF)
self.play(Write(RecurrenceFormula))
self.wait()
self.play(Write(GeneralFormula))
self.wait()
RecurrenceFormula.target = Tex(
r"a_n=\begin{cases}1&{n=1,2,}\\a_{n-1}+a_{n-2}&n\geq3.\end{cases}"
).replace(RecurrenceFormula).scale(1.25).shift(UP*.5)
GeneralFormula.target = Tex(
r"a_n=\frac{1}{\sqrt{5}}\left[\left(\frac{1+\sqrt{5}}{2}\right)^n-\left(\frac{1-\sqrt{5}}{2}\right)^n\right]"
).next_to(RecurrenceFormula.target, DOWN, buff=LARGE_BUFF)
self.play(MoveToTarget(RecurrenceFormula), MoveToTarget(GeneralFormula))
self.wait()
self.play(
FadeOut(GeneralFormula),
RecurrenceFormula.animate.move_to(ORIGIN)
)
self.wait()
Fib = [1, 1]
for i in range(2, 2022):
Fib.append(Fib[i-1]+Fib[i-2])
Fib_eq = []
for i in tqdm(range(2021)):
Fib_eq.append(Text("a["+str(i+1)+"]"))
VGroup(*Fib_eq).arrange(DOWN).next_to(RecurrenceFormula, DOWN)
self.play(*[Write(_Fib_eq) for _Fib_eq in Fib_eq], run_time=2)
self.wait()
self.play(self.camera.frame.animate.move_to(Fib_eq[-1].get_center()), run_time=10)
self.wait()
self.play(*[FadeOut(_mobjects) for _mobjects in self.mobjects])
self.wait()
return super().construct()
class RecurrenceFibIntroduction(Scene):
def construct(self):
title = Text("斐波那契数列", font='DengXian')
self.play(Write(title))
self.wait()
subtitle = Text("Fibonacci", font='DengXian')
subtitle.next_to(title, DOWN)
self.play(Write(subtitle))
subtitle.target = Text("Fib", font='DengXian').next_to(title, DOWN)
self.play(MoveToTarget(subtitle))
self.wait()
subtitle.target = Text("fib", font='DengXian').next_to(title, DOWN)
self.play(MoveToTarget(subtitle))
self.wait()
self.play(FadeOut(subtitle))
self.wait()
self.play(title.animate.to_edge(UP).scale(0.75))
RecurrenceFormula = Tex(
r"a_n=\begin{cases}1&{n=1,2,}\\a_{n-1}+a_{n-2}&n\geq3.\end{cases}"
).scale(1.25).shift(UP*.5)
GeneralFormula = Tex(
r"a_n=\frac{1}{\sqrt{5}}\left[\left(\frac{1+\sqrt{5}}{2}\right)^n-\left(\frac{1-\sqrt{5}}{2}\right)^n\right]"
).next_to(RecurrenceFormula, DOWN, buff=LARGE_BUFF)
self.play(Write(RecurrenceFormula), Write(GeneralFormula))
self.wait()
self.play(FadeOut(RecurrenceFormula), FadeOut(GeneralFormula))
self.wait()
seq = Sequence([0 for i in range(10)]).move_to(ORIGIN)
seq.on_show(self)
seq.write(1, 1, self)
seq.write(2, 1, self)
for pos in range(3, 11):
seq.activate(pos, self)
seq.write(pos, seq.get_val(pos-1)+seq.get_val(pos-2), self)
self.play(*[FadeOut(_mobject) for _mobject in self.mobjects])
return super().construct()
class RecursionFibIntroduction(Scene):
def construct(self):
title = Text("斐波那契数列", font='DengXian')
subtitle = Text("(递归解法)", font='DengXian')
subtitle.scale(0.75).next_to(title, DOWN, buff=MED_SMALL_BUFF)
self.play(
Write(title),
Write(subtitle)
)
self.wait()
self.play(
FadeOut(title),
FadeOut(subtitle)
)
seq = Sequence([1, 1, 0, 0, 0])
main_call = seq.cells[rid(5)].copy().next_to(seq, DOWN, buff=MED_LARGE_BUFF)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(main_call))
return super().construct()
class trying1(Scene):
def construct(self):
tex = Tex("a=1")
self.play(Write(tex))
return super().construct()
class trying2(Scene):
def construct(self):
hello = Tex("1")
rec = Rectangle()
f_always(rec.move_to, hello.get_center)
self.play(Write(hello))
self.play(ShowCreation(rec))
self.play(hello.animate.shift(2*RIGHT+UP))
class trying3(Scene):
def construct(self):
cell = Cell(1234567890, 7)
self.play(ShowCreation(cell))
self.play(*cell.write(1))
return super().construct()
class trying4(Scene):
def construct(self):
seq = Sequence([1, 3, 5, 2, 4, 6])
self.play(ShowCreation(seq), GrowArrow(seq.arrow))
seq.activate(4, self)
seq.activate(6, self)
seq.write(3, 123456, self)
seq.write(6, 123456, self)
seq.write(2, 1345, self)
seq.write(3, 1, self)
return super().construct()
class trying5(Scene):
def construct(self):
depth_bar = DepthBar()
self.play(ShowCreation(depth_bar))
self.play(depth_bar.deepen())
return super().construct()
class trying6(Scene):
def construct(self):
self.camera.frame.shift(DOWN)
seq = Sequence([1, 2, 3]).shift(UP)
main_caller = seq.get_cell(3).copy()
tree = CallTree(main_caller)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(tree.depth_bar))
self.play(ShowCreation(tree))
to_caller = seq.get_cell(2).copy()
to_caller.next_to(main_caller, DOWN)
self.play(*tree.extent(main_caller, seq.get_cell(2).copy(), 2))
self.play(*tree.compose())
self.play(*tree.extent(main_caller, seq.get_cell(1).copy(), 2))
self.play(*tree.compose())
return super().construct()
class trying7(Scene):
def construct(self):
rec = Rectangle()
cir = Circle()
rec.to_edge(LEFT)
cir.move_to(UP*2, RIGHT*3)
self.play(ShowCreation(rec), ShowCreation(cir))
self.play(cir.animate.align_to(rec, UP))
return super().construct()
class trying8(Scene):
def construct(self):
rec = Rectangle().shift(DOWN)
cir = Circle().shift(DOWN).to_edge(RIGHT)
self.play(ShowCreation(cir))
self.wait()
self.play(cir.animate.shift(x_shift(cir)))
return super().construct()
class trying9(Scene):
def construct(self):
rec = Rectangle().shift(LEFT*2)
cir = Circle().shift(RIGHT*2)
arrow = always_redraw(lambda :Arrow(rec.get_right(), cir.get_left()))
self.play(ShowCreation(rec), ShowCreation(cir))
self.play(GrowArrow(arrow))
self.play(rec.animate.shift(UP))
self.play(cir.animate.shift(DOWN+RIGHT*2))
return super().construct()
class trying10(Scene):
def construct(self):
seq = Sequence([1, 2, 3, 4, 5]).to_edge(UP)
main_caller = seq.get_cell(3).copy()
tree = CallTree(main_caller).next_to(seq, DOWN)
tree.depth_bar.align_to(seq, UP)
self.play(ShowCreation(seq))
self.wait()
self.play(ShowCreation(tree.depth_bar))
self.play(ShowCreation(tree))
to_caller = seq.get_cell(2).copy()
to_caller.next_to(main_caller, DOWN)
self.play(*tree.extent(main_caller, seq.get_cell(2).copy(), 2))
self.play(*tree.compose())
self.play(*tree.extent(main_caller, seq.get_cell(1).copy(), 2))
self.play(*tree.compose())
self.play(self.camera.frame.animate.shift(DOWN))
self.play(*tree.extent(tree.get_cell(2, 1), seq.get_cell(4).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 1), seq.get_cell(5).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 2), seq.get_cell(4).copy(), 3))
self.play(*tree.compose())
self.play(*tree.extent(tree.get_cell(2, 2), seq.get_cell(5).copy(), 3))
self.play(*tree.compose())
return super().construct()
| [
"os.getcwd"
] | [((106, 117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (115, 117), False, 'import os\n')] |
import numpy as np
from time import time
import matplotlib.pyplot as plt
measure2index={"y-coordinate":0,"x-coordinate":1,"timestamp":2, "button_status":3,"tilt":4, "elevation":5,"pressure":6}
index2measure=list(measure2index.keys())
task2index={"spiral":0,"l":1,"le":2 ,"les":3,"lektorka" :4,"porovnat":5,"nepopadnout":6, "tram":7}
index2task=list(task2index.keys())
max_lengths=[16071, 4226, 6615, 6827, 7993, 5783, 4423, 7676]#max length per task
token_lengths=[16071,1242,1649,1956]#max length per token
stroke_lengths=[16071,752,1104,1476,3568,2057,2267,1231]#max length per stroke (either on paper or in air)
stroke_avg_plus_std=[2904,277,363,411,484,346,324,218]#stroke avg length + stroke avg length std
max_strokes=[25,15,15,21,29,43,35, 67]#max n° of strokes per task (in air + on paper)
plot2index={"loss":0,"accuracy":1}
index2plot= list(plot2index.keys())
on_paper_value=1.0#on_paper_stroke iff button_status==1.0
one_hot=np.identity(8)
def downsample(task,factor=2):
downsampled=[point for i,point in enumerate(task) if i%factor==0]
downsampled=np.array(downsampled)
return downsampled
def upsample(task):
upsampled=[]
for i,point in enumerate(task[:-1]):
upsampled.append(point)
upsampled.append(np.mean(task[i:i+2],axis=0))
upsampled=np.array(upsampled)
#/!\ np.aronud button_status after resampling !!
upsampled[:,measure2index["button_status"]]=np.around(upsampled[:,measure2index["button_status"]])
return upsampled
def get_significance(p):
"""used to print significance of a statistic test given p-value)"""
if p<0.01:
significance="***"
elif p<0.05:
significance="**"
elif p<0.1:
significance="*"
else:
significance="_"
return significance
def CorrectPool(out_size,current_pool):
"""makes convolved size divisible by pooling kernel"""
ratio=out_size/current_pool
if (ratio)%1==0:#whole number
return int(current_pool)
else:
whole_ratio=round(ratio)
if whole_ratio==0:
whole_ratio+=1
return int(out_size/whole_ratio)
def CorrectHyperparameters(input_size,seq_len,hidden_size,conv_kernel,pool_kernel ,padding=0,
stride=1,dilation=1, dropout=0.0,output_size=1,n_seq=1):
"""makes convolved size divisible by pooling kernel and computes size of sequence after convolutions"""
out_size=seq_len
print("seq_len :",out_size)
for i, (h,c,p,pad,d) in enumerate(list(zip(hidden_size,conv_kernel,pool_kernel,padding,dilation))):
print("layer",i+1)
in_size=out_size
out_size=get_out_size(out_size,pad,d,c,stride=1)
print("\tafter conv{} :{}".format(i+1,out_size))
if out_size<1:
c=(in_size-1)//d+1
out_size=get_out_size(in_size,pad,d,c,stride=1)
print("\t\tupdate c. after conv{} :{}".format(i+1,out_size))
conv_kernel[i]=c
pool_kernel[i]=CorrectPool(out_size,p)
out_size=get_out_size(out_size,padding=0,dilation=1,kernel_size=pool_kernel[i],stride=pool_kernel[i])
print("\tafter pool{} :{}".format(i+1,out_size))
out_size*=hidden_size[-1]
print("after flatting",out_size)
return input_size,out_size,hidden_size,conv_kernel,pool_kernel ,padding,stride,dilation, dropout,output_size
def wrong_len_gen(data,good_len):
"""used for splitting tasks into tokens"""
for i,s in enumerate(data):
if len(s) != good_len:
yield i
def get_out_size(in_size,padding,dilation,kernel_size,stride):
"""computes output size after a conv or a pool layer"""
return (in_size+2*padding-dilation*(kernel_size-1)-1)//stride +1
def min_max_scale(data,min_=0,max_=1):
return (max_-min_)*(data-np.min(data)/(np.max(data)-np.min(data)))+min_
def count_params(model):
"""returns (total n° of parameters, n° of trainable parameters)"""
total_params = sum(p.numel() for p in model.parameters())
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return total_params, trainable_params
def plot_task(task,measure2index=measure2index):
plt.plot(task[:,measure2index["x-coordinate"]],task[:,measure2index["y-coordinate"]])
plt.xlabel("x-coordinate")
plt.ylabel("y-coordinate")
def plot_measures(task,subplot=False,figsize=(6,4),index2measure=index2measure):
plt.figure(figsize=figsize)
for i,measure in enumerate(index2measure):
if subplot:
plt.subplot(3,3,i+1)
plt.plot(task[:,i],label=measure)
plt.xlabel("timesteps")
plt.ylabel(measure)
plt.legend()
def return_metrics(tp,tn,fp,fn):
accuracy= (tp+tn)/(tp+tn+fp+fn)
sensitivity = tp/(tp+fn) if (tp+fn) != 0 else 0.0 #without condition positives the sensitivity should be 0
specificity = tn/(tn+fp) if (tn+fp)!= 0 else 0.0 #idem
ppv = tp/(tp+fp) if tp+fp != 0 else 0.0 #without predicted positives the ppv should be 0
npv = tn/(tn+fn) if tn+fn !=0 else 0.0 #idem
return accuracy,sensitivity,specificity,ppv,npv
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected.')
def flat_list(list):
return [item for sublist in list for item in sublist]
def timeSince(since):
now = time()
s = now - since
m = np.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def ReshapeAndVote(model_train_predictions,round_before_voting=True):
"""used to fuse the predictions of n_models models after n_CV CV"""
n_CV=len(model_train_predictions[0])
n_models=len(model_train_predictions)
if round_before_voting:
reshaped_train_predictions=[[np.around(model_train_predictions[i][j]) for i in range(n_models)] for j in range(n_CV)]
else:
reshaped_train_predictions=[[model_train_predictions[i][j] for i in range(n_models)] for j in range(n_CV)]
voted_train_predictions=[np.around(np.mean(reshaped_train_predictions[i],axis=0)) for i in range(n_CV)]
return voted_train_predictions
def confusion_matrix(y_true,y_pred):
if len(y_true)!=len(y_pred):
raise ValueError("y_true and y_pred should have the same shape, got {} and {}, respectively".format(len(y_true),len(y_pred)))
tn, fp, fn, tp=0,0,0,0
false_i=[]
for i, (target, pred) in enumerate(list(zip(y_true,y_pred))):
if target==0:#condition negative
if pred==0:
tn+=1
elif pred==1:
fp+=1
false_i.append(i)
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
elif target==1:#condition positive
if pred==0:
fn+=1
false_i.append(i)
elif pred ==1:
tp+=1
else:
raise ValueError("model prediction should either be 0 or 1, got {}".format(pred))
else:
raise ValueError("target should either be 0 or 1, got {}".format(target))
return tn, fp, fn, tp, false_i
| [
"numpy.identity",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.floor",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.around",
"numpy.min",
"time.time",
"matplotlib.pyplot.legend"
] | [((938, 952), 'numpy.identity', 'np.identity', (['(8)'], {}), '(8)\n', (949, 952), True, 'import numpy as np\n'), ((1071, 1092), 'numpy.array', 'np.array', (['downsampled'], {}), '(downsampled)\n', (1079, 1092), True, 'import numpy as np\n'), ((1295, 1314), 'numpy.array', 'np.array', (['upsampled'], {}), '(upsampled)\n', (1303, 1314), True, 'import numpy as np\n'), ((1416, 1471), 'numpy.around', 'np.around', (["upsampled[:, measure2index['button_status']]"], {}), "(upsampled[:, measure2index['button_status']])\n", (1425, 1471), True, 'import numpy as np\n'), ((4129, 4222), 'matplotlib.pyplot.plot', 'plt.plot', (["task[:, measure2index['x-coordinate']]", "task[:, measure2index['y-coordinate']]"], {}), "(task[:, measure2index['x-coordinate']], task[:, measure2index[\n 'y-coordinate']])\n", (4137, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4219, 4245), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x-coordinate"""'], {}), "('x-coordinate')\n", (4229, 4245), True, 'import matplotlib.pyplot as plt\n'), ((4250, 4276), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y-coordinate"""'], {}), "('y-coordinate')\n", (4260, 4276), True, 'import matplotlib.pyplot as plt\n'), ((4362, 4389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4372, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4608), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4606, 4608), True, 'import matplotlib.pyplot as plt\n'), ((5381, 5387), 'time.time', 'time', ([], {}), '()\n', (5385, 5387), False, 'from time import time\n'), ((5416, 5432), 'numpy.floor', 'np.floor', (['(s / 60)'], {}), '(s / 60)\n', (5424, 5432), True, 'import numpy as np\n'), ((4498, 4533), 'matplotlib.pyplot.plot', 'plt.plot', (['task[:, i]'], {'label': 'measure'}), '(task[:, i], label=measure)\n', (4506, 4533), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4563), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""timesteps"""'], {}), "('timesteps')\n", (4550, 4563), True, 'import matplotlib.pyplot as plt\n'), ((4572, 4591), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['measure'], {}), '(measure)\n', (4582, 4591), True, 'import matplotlib.pyplot as plt\n'), ((1252, 1282), 'numpy.mean', 'np.mean', (['task[i:i + 2]'], {'axis': '(0)'}), '(task[i:i + 2], axis=0)\n', (1259, 1282), True, 'import numpy as np\n'), ((4469, 4493), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(i + 1)'], {}), '(3, 3, i + 1)\n', (4480, 4493), True, 'import matplotlib.pyplot as plt\n'), ((6023, 6069), 'numpy.mean', 'np.mean', (['reshaped_train_predictions[i]'], {'axis': '(0)'}), '(reshaped_train_predictions[i], axis=0)\n', (6030, 6069), True, 'import numpy as np\n'), ((5769, 5809), 'numpy.around', 'np.around', (['model_train_predictions[i][j]'], {}), '(model_train_predictions[i][j])\n', (5778, 5809), True, 'import numpy as np\n'), ((3744, 3756), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3750, 3756), True, 'import numpy as np\n'), ((3758, 3770), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3764, 3770), True, 'import numpy as np\n'), ((3771, 3783), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3777, 3783), True, 'import numpy as np\n')] |
import datetime
datenasc = int(input(f'insert you date of bit '))
atualdate = str(datetime.date.today())[0:4]
datestr = int(atualdate)
datefinal = datestr - datenasc
print(datefinal)
if datefinal < 18:
print(f'voce esta com {datefinal}Faltam {18-datefinal} pra você se alistar ao exercito hahahah' )
elif datefinal == 18:
print(f'Você completa 18 anos agora em {atualdate}'
f'Chegou a hora ser servir seu país como bucha de canhão otario.\nPegue seus documentos ')
else:
print(f'Você escapou sabichão, ja esta com {datefinal}, se livrou né safadenho')
| [
"datetime.date.today"
] | [((82, 103), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (101, 103), False, 'import datetime\n')] |
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the `create_cc_sysroot.py` script."""
import getpass
from pathlib import Path
from typing import Tuple
from cross_compile.sysroot_compiler import DockerConfig
from cross_compile.sysroot_compiler import Platform
from cross_compile.sysroot_compiler import QEMU_DIR_NAME
from cross_compile.sysroot_compiler import ROS_DOCKERFILE_NAME
from cross_compile.sysroot_compiler import SYSROOT_DIR_NAME
from cross_compile.sysroot_compiler import SysrootCompiler
import pytest
def _default_docker_kwargs() -> dict:
return {
'arch': 'aarch64',
'os': 'ubuntu',
'rosdistro': 'dashing',
'sysroot_base_image': '035662560449.dkr.ecr.us-east-2.amazonaws.com/cc-tool:'
'aarch64-bionic-dashing-fastrtps-prebuilt',
'docker_network_mode': 'host',
'sysroot_nocache': False,
}
@pytest.fixture
def platform_config() -> Platform:
return Platform(
arch='aarch64',
os='ubuntu',
rosdistro='dashing',
rmw='fastrtps')
@pytest.fixture
def docker_config() -> DockerConfig:
return DockerConfig(**_default_docker_kwargs())
def setup_mock_sysroot(path: Path) -> Tuple[Path, Path]:
"""Create mock directories to correctly construct the SysrootCreator."""
sysroot_dir = path / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
docker_ws_dir = sysroot_dir / ROS_DOCKERFILE_NAME
docker_ws_dir.ensure()
return sysroot_dir, ros_workspace_dir
def test_get_workspace_image_tag(platform_config):
"""Make sure the image tag is created correctly."""
image_tag = platform_config.get_workspace_image_tag()
test_tag = '{}/{}:latest'.format(getpass.getuser(), str(platform_config))
assert isinstance(image_tag, str)
assert image_tag == test_tag
def test_docker_config_args(docker_config):
"""Make sure the Docker configuration is setup correctly."""
args = _default_docker_kwargs()
test_config_string = (
'Base Image: {}\n'
'Network Mode: {}\n'
'Caching: {}'
).format(
args['sysroot_base_image'], args['docker_network_mode'], args['sysroot_nocache']
)
config_string = str(docker_config)
assert isinstance(config_string, str)
assert config_string == test_config_string
def test_sysroot_compiler_constructor(
platform_config, docker_config, tmpdir):
"""Test the SysrootCompiler constructor assuming valid path setup."""
# Create mock directories and files
sysroot_dir, ros_workspace_dir = setup_mock_sysroot(tmpdir)
sysroot_compiler = SysrootCompiler(
str(tmpdir), 'ros_ws', platform_config,
docker_config, None)
assert isinstance(sysroot_compiler.get_build_setup_script_path(), Path)
assert isinstance(sysroot_compiler.get_system_setup_script_path(), Path)
def test_sysroot_compiler_tree_validation(platform_config, docker_config, tmpdir):
"""
Ensure that the SysrootCompiler constructor validates the workspace.
Start with empty directory and add one piece at a time, expecting failures until
all parts are present.
"""
kwargs = {
'cc_root_dir': str(tmpdir),
'ros_workspace_dir': 'ros_ws',
'platform': platform_config,
'docker_config': docker_config,
'custom_setup_script_path': None,
}
# There's no 'sysroot' at all yet
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
sysroot_dir = tmpdir / SYSROOT_DIR_NAME
sysroot_dir.mkdir()
# ROS2 ws and qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
ros_workspace_dir = sysroot_dir / 'ros_ws'
ros_workspace_dir.mkdir()
# qemu dirs are missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_dir = sysroot_dir / QEMU_DIR_NAME
qemu_dir.mkdir()
# the qemu binary is still missing
with pytest.raises(FileNotFoundError):
compiler = SysrootCompiler(**kwargs)
qemu_binary_mock = qemu_dir / 'qemu'
qemu_binary_mock.ensure()
# everything is present now
compiler = SysrootCompiler(**kwargs)
assert compiler
def verify_base_docker_images(arch, os, rosdistro, image_name):
"""Assert correct base image is generated."""
sysroot_base_image = None
docker_network_mode = 'host'
sysroot_nocache = 'False'
assert DockerConfig(
arch, os, rosdistro, sysroot_base_image,
docker_network_mode, sysroot_nocache).base_image == image_name
def test_get_docker_base_image():
"""Test that the correct base docker image is used for all arguments."""
verify_base_docker_images('aarch64', 'ubuntu', 'dashing', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'eloquent', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'ubuntu', 'kinetic', 'arm64v8/ubuntu:xenial')
verify_base_docker_images('aarch64', 'ubuntu', 'melodic', 'arm64v8/ubuntu:bionic')
verify_base_docker_images('aarch64', 'debian', 'dashing', 'arm64v8/debian:stretch')
verify_base_docker_images('aarch64', 'debian', 'eloquent', 'arm64v8/debian:buster')
verify_base_docker_images('aarch64', 'debian', 'kinetic', 'arm64v8/debian:jessie')
verify_base_docker_images('aarch64', 'debian', 'melodic', 'arm64v8/debian:stretch')
verify_base_docker_images('armhf', 'ubuntu', 'dashing', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'eloquent', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'ubuntu', 'kinetic', 'arm32v7/ubuntu:xenial')
verify_base_docker_images('armhf', 'ubuntu', 'melodic', 'arm32v7/ubuntu:bionic')
verify_base_docker_images('armhf', 'debian', 'dashing', 'arm32v7/debian:stretch')
verify_base_docker_images('armhf', 'debian', 'eloquent', 'arm32v7/debian:buster')
verify_base_docker_images('armhf', 'debian', 'kinetic', 'arm32v7/debian:jessie')
verify_base_docker_images('armhf', 'debian', 'melodic', 'arm32v7/debian:stretch')
| [
"cross_compile.sysroot_compiler.SysrootCompiler",
"cross_compile.sysroot_compiler.DockerConfig",
"pytest.raises",
"cross_compile.sysroot_compiler.Platform",
"getpass.getuser"
] | [((1545, 1619), 'cross_compile.sysroot_compiler.Platform', 'Platform', ([], {'arch': '"""aarch64"""', 'os': '"""ubuntu"""', 'rosdistro': '"""dashing"""', 'rmw': '"""fastrtps"""'}), "(arch='aarch64', os='ubuntu', rosdistro='dashing', rmw='fastrtps')\n", (1553, 1619), False, 'from cross_compile.sysroot_compiler import Platform\n'), ((4972, 4997), 'cross_compile.sysroot_compiler.SysrootCompiler', 'SysrootCompiler', ([], {}), '(**kwargs)\n', (4987, 4997), False, 'from cross_compile.sysroot_compiler import SysrootCompiler\n'), ((2501, 2518), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (2516, 2518), False, 'import getpass\n'), ((4191, 4223), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (4204, 4223), False, 'import pytest\n'), ((4244, 4269), 'cross_compile.sysroot_compiler.SysrootCompiler', 'SysrootCompiler', ([], {}), '(**kwargs)\n', (4259, 4269), False, 'from cross_compile.sysroot_compiler import SysrootCompiler\n'), ((4388, 4420), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (4401, 4420), False, 'import pytest\n'), ((4441, 4466), 'cross_compile.sysroot_compiler.SysrootCompiler', 'SysrootCompiler', ([], {}), '(**kwargs)\n', (4456, 4466), False, 'from cross_compile.sysroot_compiler import SysrootCompiler\n'), ((4582, 4614), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (4595, 4614), False, 'import pytest\n'), ((4635, 4660), 'cross_compile.sysroot_compiler.SysrootCompiler', 'SysrootCompiler', ([], {}), '(**kwargs)\n', (4650, 4660), False, 'from cross_compile.sysroot_compiler import SysrootCompiler\n'), ((4774, 4806), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (4787, 4806), False, 'import pytest\n'), ((4827, 4852), 'cross_compile.sysroot_compiler.SysrootCompiler', 'SysrootCompiler', ([], {}), '(**kwargs)\n', (4842, 4852), False, 'from cross_compile.sysroot_compiler import SysrootCompiler\n'), ((5238, 5333), 'cross_compile.sysroot_compiler.DockerConfig', 'DockerConfig', (['arch', 'os', 'rosdistro', 'sysroot_base_image', 'docker_network_mode', 'sysroot_nocache'], {}), '(arch, os, rosdistro, sysroot_base_image, docker_network_mode,\n sysroot_nocache)\n', (5250, 5333), False, 'from cross_compile.sysroot_compiler import DockerConfig\n')] |
import cx_Oracle
from oracledb import OracleJSONDatabaseConnection
import json
jsondb = OracleJSONDatabaseConnection()
connection = jsondb.get_connection()
connection.autocommit = True
soda = connection.getSodaDatabase()
x_collection = soda.createCollection('f1_2021_weather')
all_data = list()
for doc in x_collection.find().getCursor():
content = doc.getContent()
all_data.append(content)
print('Data length: {}'.format(len(all_data)))
with open("weather.json", 'w') as outfile:
outfile.write(json.dumps(all_data, indent=4))
outfile.close() | [
"json.dumps",
"oracledb.OracleJSONDatabaseConnection"
] | [((90, 120), 'oracledb.OracleJSONDatabaseConnection', 'OracleJSONDatabaseConnection', ([], {}), '()\n', (118, 120), False, 'from oracledb import OracleJSONDatabaseConnection\n'), ((514, 544), 'json.dumps', 'json.dumps', (['all_data'], {'indent': '(4)'}), '(all_data, indent=4)\n', (524, 544), False, 'import json\n')] |
from Tkinter import *
import ttk
import BuyBook
import BookInformationPage
import Message
class UserPage(object):
def __init__(self, root, color, font, dbConnection, userInfo):
for child in root.winfo_children():
child.destroy()
self.root = root
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.screen_width = self.root.winfo_screenwidth() * 3 / 4
self.screen_height = self.root.winfo_screenheight() * 3 / 4
self.gui_init()
def gui_init(self):
self.up_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height / 8,
width=self.screen_width)
self.up_frame.grid_propagate(0)
self.up_frame.pack(side=TOP, expand=True, fill=BOTH)
self.down_frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
height=self.screen_height * 7 / 8,
width=self.screen_width)
self.down_frame.grid_propagate(0)
self.down_frame.pack(side=TOP, expand=True, fill=BOTH)
self.profileFrame = ProfileFrame(self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color,
self.font, self.userInfo)
self.logoutFrame = LogOutFrame(
self.root, self.up_frame, self.screen_width / 2,
self.screen_height / 8, self.color, self.font, self.dbConnection)
self.booksInfoFrame = BuyedBooks(
self.down_frame, self.screen_width, self.screen_height * 7 / 8,
self.color, self.font, self.dbConnection, self.userInfo)
class ProfileFrame(object):
def __init__(self, root, width, height, color, font, userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bg=self.color,
bd=5,
relief=RAISED,
width=self.width,
height=self.height)
self.frame.pack(expand=True, side=LEFT, fill=BOTH)
self.frame.grid_propagate(0)
profile_info = self.extract_profile()
self.profileLabel = Label(
self.frame, text=profile_info, font=self.font, bg=self.color)
self.profileLabel.place(relx=0.5, rely=0.5, anchor='center')
def extract_profile(self):
userInfo = "\n".join(self.userInfo.values())
return userInfo
class LogOutFrame(object):
def __init__(self, parent, root, width, height, color, font, dbConnection):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.parent = parent
self.dbConnection = dbConnection
self.gui_init()
def gui_init(self):
self.frame = Frame(
self.root,
cursor='hand1',
bd=5,
relief=RAISED,
bg=self.color,
width=self.width,
height=self.height)
self.frame.pack(side=LEFT, expand=True, fill=BOTH)
self.frame.grid_propagate(0)
self.logout_button = Button(
self.frame, text="LogOut", font=self.font, borderwidth=5)
self.logout_button.place(relx=0.5, rely=0.5, anchor='center')
self.logout_button.bind("<Button-1>", self.__logOutAction)
def __logOutAction(self, event):
self.dbConnection.close()
for child in self.parent.winfo_children():
child.destroy()
self.parent.destroy()
class BuyedBooks(object):
def __init__(self, root, width, height, color, font, dbConnection,
userInfo):
self.root = root
self.width = width
self.height = height
self.color = color
self.font = font
self.dbConnection = dbConnection
self.userInfo = userInfo
self.gui_init()
def gui_init(self):
frame_up = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_up.grid_propagate(0)
frame_up.pack(side=TOP, expand=True, fill=BOTH)
frame_middle = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 10 / 12)
frame_middle.grid_propagate(0)
frame_middle.pack(side=TOP, expand=True, fill=BOTH)
frame_down = Frame(
self.root,
cursor='hand1',
bg=self.color,
width=self.width,
height=self.height * 1 / 12)
frame_down.grid_propagate(0)
frame_down.pack(side=TOP, expand=True, fill=BOTH)
self.uploadedFilesLabel = Label(
frame_up, text="BuyedBooks", font=self.font, bg=self.color)
self.uploadedFilesLabel.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplay = ttk.Treeview(
frame_middle,
columns=('#1', '#2', '#3', '#4', '#5'),
height=20,
show='headings',
padding=(1, 1, 1, 1))
self.booksDisplay.heading('#1', text='Title')
self.booksDisplay.heading('#2', text='Author')
self.booksDisplay.heading('#3', text='Genre')
self.booksDisplay.heading('#4', text='Quantity')
self.booksDisplay.heading('#5', text='Review Score')
self.booksDisplay.column('#1', stretch=True, width=self.width / 5)
self.booksDisplay.column('#2', stretch=True, width=self.width / 5)
self.booksDisplay.column('#3', stretch=True, width=self.width / 5)
self.booksDisplay.column('#4', stretch=True, width=self.width / 5)
self.booksDisplay.column('#5', stretch=True, width=self.width / 5)
self.booksDisplay.pack(side=TOP, fill=BOTH, expand=True)
#self.booksDisplay.grid(row=5, columnspan=4, sticky='nw')
#self.booksDisplay.place(relx=0.5, rely=0.5, anchor='center')
self.booksDisplayStyle = ttk.Style()
self.booksDisplayStyle.configure(
"Treeview", font=self.font, rowheight=50)
self.booksDisplayStyle.configure("Treeview.Heading", font=self.font)
#bind treeview to mouse click
self.booksDisplay.bind("<ButtonRelease-1>", self.__bookInfo)
self.booksDisplay.tag_configure(
"tagBook", background="white", foreground="red", font=self.font)
self.addNewBookButton = Button(
frame_down, text="Buy new book", font=self.font)
self.addNewBookButton.place(relx=0.5, rely=0.5, anchor='center')
self.addNewBookButton.bind("<Button-1>", self.__buyNewBook)
self.__display_availableBooks()
def __buyNewBook(self, event):
new_window = Toplevel(self.root)
BuyBook.BuyBook(new_window, self.color, self.font, self.dbConnection,
self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __bookInfo(self, event):
selectedItem = self.booksDisplay.focus()
valueItem = self.booksDisplay.item(selectedItem)['values']
bookName=valueItem[0]
new_window = Toplevel(self.root)
newBookInfo = BookInformationPage.BookInformation(
new_window, self.color, self.dbConnection, valueItem[0], self.userInfo)
new_window.wait_window()
self.__display_availableBooks()
def __display_availableBooks(self):
for child in self.booksDisplay.get_children():
self.booksDisplay.delete(child)
cursor = self.dbConnection.cursor()
args = (self.userInfo['userName'], )
cursor.callproc('getUsersBooks', args)
for result in cursor.stored_results():
books = result.fetchall()
for book in books:
self.booksDisplay.insert(
'', 'end', values=book, tags='tagBook')
cursor.close()
| [
"ttk.Treeview",
"BookInformationPage.BookInformation",
"BuyBook.BuyBook",
"ttk.Style"
] | [((5261, 5382), 'ttk.Treeview', 'ttk.Treeview', (['frame_middle'], {'columns': "('#1', '#2', '#3', '#4', '#5')", 'height': '(20)', 'show': '"""headings"""', 'padding': '(1, 1, 1, 1)'}), "(frame_middle, columns=('#1', '#2', '#3', '#4', '#5'), height=\n 20, show='headings', padding=(1, 1, 1, 1))\n", (5273, 5382), False, 'import ttk\n'), ((6332, 6343), 'ttk.Style', 'ttk.Style', ([], {}), '()\n', (6341, 6343), False, 'import ttk\n'), ((7116, 7205), 'BuyBook.BuyBook', 'BuyBook.BuyBook', (['new_window', 'self.color', 'self.font', 'self.dbConnection', 'self.userInfo'], {}), '(new_window, self.color, self.font, self.dbConnection, self.\n userInfo)\n', (7131, 7205), False, 'import BuyBook\n'), ((7542, 7654), 'BookInformationPage.BookInformation', 'BookInformationPage.BookInformation', (['new_window', 'self.color', 'self.dbConnection', 'valueItem[0]', 'self.userInfo'], {}), '(new_window, self.color, self.\n dbConnection, valueItem[0], self.userInfo)\n', (7577, 7654), False, 'import BookInformationPage\n')] |
import os
import simplejson as json
def readJson(fileName):
test_filename = os.path.join(os.path.dirname(__file__), fileName)
with open(test_filename, mode='rb') as json_file:
return json.load(json_file)
| [
"simplejson.load",
"os.path.dirname"
] | [((95, 120), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (110, 120), False, 'import os\n'), ((201, 221), 'simplejson.load', 'json.load', (['json_file'], {}), '(json_file)\n', (210, 221), True, 'import simplejson as json\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import daiquiri
from time import time
import warp10client
LOG = daiquiri.getLogger(__name__)
warp10_api_url = '' # Add here backend url where metrics are stored
read_token = '' # Add here your metrics read token
write_token = '' # Add here your metrics write token
# To get metrics:
metric_get = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
'aggregate': {
'type': 'mean',
'span': 1000000 * 3600,
},
'timestamp': {
'start': "2017-01-01T00:00:00.000Z",
'end': "2018-01-01T00:00:00.000Z"
}
# 'timestamp': { 'end': "2018-01-01T00:00:00.000Z" }
# 'timestamp': { 'start': None, 'end': None }
}
# To write metrics:
metric_write = {
'name': 'cpu_util_mjozefcz',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
'unit': '%',
},
'position': {
'longitude': None,
'latitude': None,
'elevation': None,
'timestamp': time() * 1000 * 1000,
},
'value': 11,
}
# To check metrics
metric_check = {
'name': 'cpu_util',
'tags': {
'resource_id': '18d94676-077c-4c13-b000-27fd603f3056',
'project_id': '8069f876e7d444249ef04b9a74090711',
},
}
# arguments need to authorize in metrics backend
kwargs = {
'write_token': write_token,
'read_token': read_token,
'warp10_api_url': warp10_api_url,
}
client = warp10client.Warp10Client(**kwargs)
# Consider to create timeseries, new object with included metrics as each point
# Thats goooood idea.
metric_get_test = client.get(metric_get)
metric_exists = client.exists(metric_check)
metric_obj = warp10client.Metric(**metric_write)
metric_send = client.set(metric_write)
# delete method is not yet implemented
# metric_send = client.delete(metric_write)
| [
"warp10client.Metric",
"daiquiri.getLogger",
"time.time",
"warp10client.Warp10Client"
] | [((113, 141), 'daiquiri.getLogger', 'daiquiri.getLogger', (['__name__'], {}), '(__name__)\n', (131, 141), False, 'import daiquiri\n'), ((1600, 1635), 'warp10client.Warp10Client', 'warp10client.Warp10Client', ([], {}), '(**kwargs)\n', (1625, 1635), False, 'import warp10client\n'), ((1839, 1874), 'warp10client.Metric', 'warp10client.Metric', ([], {}), '(**metric_write)\n', (1858, 1874), False, 'import warp10client\n'), ((1174, 1180), 'time.time', 'time', ([], {}), '()\n', (1178, 1180), False, 'from time import time\n')] |
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.binding.datatypes as xs
import pyxb.binding.basis
import pyxb.utils.domutils
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:simpleType name="foo"/>
</xs:schema>'''
from pyxb.exceptions_ import *
import unittest
class TestTrac_200908181430 (unittest.TestCase):
def testParsing (self):
self.assertRaises(pyxb.SchemaValidationError, pyxb.binding.generate.GeneratePython, schema_text=xsd)
if __name__ == '__main__':
unittest.main()
| [
"logging.getLogger",
"unittest.main",
"logging.basicConfig"
] | [((99, 126), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (116, 126), False, 'import logging\n'), ((70, 91), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (89, 91), False, 'import logging\n'), ((677, 692), 'unittest.main', 'unittest.main', ([], {}), '()\n', (690, 692), False, 'import unittest\n')] |
import sys
ii = int(sys.argv[1])
env = sys.argv[2]
# python3 print_data_structure.py 22 MD10
import glob
import os
import numpy as n
import EmergeIterate
iterate = EmergeIterate.EmergeIterate(ii, env)
iterate.open_snapshots()
def print_attr(h5item):
for attr in h5item:
print(attr, h5item[attr])
def print_all_key(h5item):
for key in h5item.keys():
print('========================================')
print(key, h5item[key])
print('- - - - - - - - - - - - - - - - - - - - ')
print_attr(h5item[key])
def print_data_structure(h5item):
print('+ + + + + + + HEADER + + + + + + + + +')
print_attr(h5item.attrs)
print('\n')
print('+ + + + + + + DATA + + + + + + + + + +')
print_all_key(h5item)
print_data_structure(iterate.f0)
| [
"EmergeIterate.EmergeIterate"
] | [((165, 201), 'EmergeIterate.EmergeIterate', 'EmergeIterate.EmergeIterate', (['ii', 'env'], {}), '(ii, env)\n', (192, 201), False, 'import EmergeIterate\n')] |
import pytest
import six
import ujson
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline as _TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.coders import typecoders
from apache_beam.typehints import Dict, Union
from pipe_tools.coders import JSONDictCoder
from pipe_tools.coders import JSONDict
from pipe_tools.generator import MessageGenerator
class MyType():
pass
@pytest.mark.filterwarnings('ignore:Using fallback coder:UserWarning')
@pytest.mark.filterwarnings('ignore:The compiler package is deprecated and removed in Python 3.x.:DeprecationWarning')
class TestCoders():
def test_JSONDictCoder(self):
records = [
{},
{'a': 1, 'b': 2, 'c': None},
{"test":None},
]
coder = JSONDictCoder()
for r in records:
assert r == coder.decode(coder.encode(r))
def test_type_hints(self):
messages = MessageGenerator()
source = beam.Create(messages)
assert source.get_output_type() == Dict[six.binary_type, Union[float, int]]
with _TestPipeline() as p:
result = (
p | beam.Create(messages)
)
p.run() | [
"pytest.mark.filterwarnings",
"pipe_tools.generator.MessageGenerator",
"apache_beam.Create",
"apache_beam.testing.test_pipeline.TestPipeline",
"pipe_tools.coders.JSONDictCoder"
] | [((491, 560), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Using fallback coder:UserWarning"""'], {}), "('ignore:Using fallback coder:UserWarning')\n", (517, 560), False, 'import pytest\n'), ((562, 689), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:The compiler package is deprecated and removed in Python 3.x.:DeprecationWarning"""'], {}), "(\n 'ignore:The compiler package is deprecated and removed in Python 3.x.:DeprecationWarning'\n )\n", (588, 689), False, 'import pytest\n'), ((865, 880), 'pipe_tools.coders.JSONDictCoder', 'JSONDictCoder', ([], {}), '()\n', (878, 880), False, 'from pipe_tools.coders import JSONDictCoder\n'), ((1013, 1031), 'pipe_tools.generator.MessageGenerator', 'MessageGenerator', ([], {}), '()\n', (1029, 1031), False, 'from pipe_tools.generator import MessageGenerator\n'), ((1050, 1071), 'apache_beam.Create', 'beam.Create', (['messages'], {}), '(messages)\n', (1061, 1071), True, 'import apache_beam as beam\n'), ((1170, 1185), 'apache_beam.testing.test_pipeline.TestPipeline', '_TestPipeline', ([], {}), '()\n', (1183, 1185), True, 'from apache_beam.testing.test_pipeline import TestPipeline as _TestPipeline\n'), ((1235, 1256), 'apache_beam.Create', 'beam.Create', (['messages'], {}), '(messages)\n', (1246, 1256), True, 'import apache_beam as beam\n')] |
#!/usr/bin/env python
"""Creates the JADE configuration for stage 2 of the demo pipeline."""
import os
import sys
from jade.models import PipelineConfig
from jade.utils.subprocess_manager import run_command
from jade.utils.utils import load_data
PRED_GDP_COMMANDS_FILE = "pred_gdp_commands.txt"
def main():
config = PipelineConfig(**load_data(os.environ["JADE_PIPELINE_STATUS_FILE"]))
cur_stage = config.stages[-1]
cur_stage_output = cur_stage.path
previous_stage = config.stages[-2]
previous_stage_output = previous_stage.path
script = "jade/extensions/demo/merge_pred_gdp.py"
with open(PRED_GDP_COMMANDS_FILE, "w") as f_out:
cmd = f"python {script} run {previous_stage_output} {cur_stage_output}"
f_out.write(cmd + "\n")
cmd = "jade config create pred_gdp_commands.txt -c config-stage2.json"
sys.exit(run_command(cmd))
if __name__ == "__main__":
main()
| [
"jade.utils.subprocess_manager.run_command",
"jade.utils.utils.load_data"
] | [((863, 879), 'jade.utils.subprocess_manager.run_command', 'run_command', (['cmd'], {}), '(cmd)\n', (874, 879), False, 'from jade.utils.subprocess_manager import run_command\n'), ((343, 393), 'jade.utils.utils.load_data', 'load_data', (["os.environ['JADE_PIPELINE_STATUS_FILE']"], {}), "(os.environ['JADE_PIPELINE_STATUS_FILE'])\n", (352, 393), False, 'from jade.utils.utils import load_data\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import passlib.utils
from hamal.conf import utils
max_password_length = cfg.IntOpt(
'max_password_length',
default=4096,
max=passlib.utils.MAX_PASSWORD_SIZE,
help=utils.fmt("""
Maximum allowed length for user passwords. Decrease this value to improve
performance. Changing this value does not effect existing passwords.
"""))
password_hash_algorithm = cfg.StrOpt(
'password_hash_algorithm',
choices=['bcrypt', 'scrypt', 'pbkdf2_sha512'],
default='bcrypt',
help=utils.fmt("""
The password hashing algorithm to use for passwords stored within hamal.
"""))
password_hash_rounds = cfg.IntOpt(
'password_hash_rounds',
help=utils.fmt("""
This option represents a trade off between security and performance. Higher
values lead to slower performance, but higher security. Changing this option
will only affect newly created passwords as existing password hashes already
have a fixed number of rounds applied, so it is safe to tune this option in a
running cluster.
The default for bcrypt is 12, must be between 4 and 31, inclusive.
The default for scrypt is 16, must be within `range(1,32)`.
The default for pbkdf_sha512 is 60000, must be within `range(1,1<32)`
WARNING: If using scrypt, increasing this value increases BOTH time AND
memory requirements to hash a password.
"""))
salt_bytesize = cfg.IntOpt(
'salt_bytesize',
min=0,
max=96,
help=utils.fmt("""
Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt.
Default for scrypt is 16 bytes.
Default for pbkfd2_sha512 is 16 bytes.
Limited to a maximum of 96 bytes due to the size of the column used to store
password hashes.
"""))
scrypt_block_size = cfg.IntOpt(
'scrypt_block_size',
help=utils.fmt("""
Optional block size to pass to scrypt hash function (the `r` parameter).
Useful for tuning scrypt to optimal performance for your CPU architecture.
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 8.
"""))
scrypt_paralellism = cfg.IntOpt(
'scrypt_parallelism',
help=utils.fmt("""
Optional parallelism to pass to scrypt hash function (the `p` parameter).
This option is only used when the `password_hash_algorithm` option is set
to `scrypt`. Defaults to 1.
"""))
GROUP_NAME = __name__.split('.')[-1]
ALL_OPTS = [
max_password_length,
password_hash_algorithm,
password_hash_rounds,
scrypt_block_size,
scrypt_paralellism,
salt_bytesize
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS} | [
"hamal.conf.utils.fmt"
] | [((756, 927), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nMaximum allowed length for user passwords. Decrease this value to improve\nperformance. Changing this value does not effect existing passwords.\n"""'], {}), '(\n """\nMaximum allowed length for user passwords. Decrease this value to improve\nperformance. Changing this value does not effect existing passwords.\n"""\n )\n', (765, 927), False, 'from hamal.conf import utils\n'), ((1071, 1176), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nThe password hashing algorithm to use for passwords stored within hamal. \n"""'], {}), '(\n """\nThe password hashing algorithm to use for passwords stored within hamal. \n"""\n )\n', (1080, 1176), False, 'from hamal.conf import utils\n'), ((1241, 1907), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nThis option represents a trade off between security and performance. Higher\nvalues lead to slower performance, but higher security. Changing this option\nwill only affect newly created passwords as existing password hashes already\nhave a fixed number of rounds applied, so it is safe to tune this option in a\nrunning cluster.\n\nThe default for bcrypt is 12, must be between 4 and 31, inclusive.\n\nThe default for scrypt is 16, must be within `range(1,32)`.\n\nThe default for pbkdf_sha512 is 60000, must be within `range(1,1<32)`\n\nWARNING: If using scrypt, increasing this value increases BOTH time AND\nmemory requirements to hash a password.\n"""'], {}), '(\n """\nThis option represents a trade off between security and performance. Higher\nvalues lead to slower performance, but higher security. Changing this option\nwill only affect newly created passwords as existing password hashes already\nhave a fixed number of rounds applied, so it is safe to tune this option in a\nrunning cluster.\n\nThe default for bcrypt is 12, must be between 4 and 31, inclusive.\n\nThe default for scrypt is 16, must be within `range(1,32)`.\n\nThe default for pbkdf_sha512 is 60000, must be within `range(1,1<32)`\n\nWARNING: If using scrypt, increasing this value increases BOTH time AND\nmemory requirements to hash a password.\n"""\n )\n', (1250, 1907), False, 'from hamal.conf import utils\n'), ((1981, 2241), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nNumber of bytes to use in scrypt and pbkfd2_sha512 hashing salt.\n\nDefault for scrypt is 16 bytes.\nDefault for pbkfd2_sha512 is 16 bytes.\n\nLimited to a maximum of 96 bytes due to the size of the column used to store\npassword hashes.\n"""'], {}), '(\n """\nNumber of bytes to use in scrypt and pbkfd2_sha512 hashing salt.\n\nDefault for scrypt is 16 bytes.\nDefault for pbkfd2_sha512 is 16 bytes.\n\nLimited to a maximum of 96 bytes due to the size of the column used to store\npassword hashes.\n"""\n )\n', (1990, 2241), False, 'from hamal.conf import utils\n'), ((2300, 2578), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nOptional block size to pass to scrypt hash function (the `r` parameter).\nUseful for tuning scrypt to optimal performance for your CPU architecture.\nThis option is only used when the `password_hash_algorithm` option is set\nto `scrypt`. Defaults to 8.\n"""'], {}), '(\n """\nOptional block size to pass to scrypt hash function (the `r` parameter).\nUseful for tuning scrypt to optimal performance for your CPU architecture.\nThis option is only used when the `password_hash_algorithm` option is set\nto `scrypt`. Defaults to 8.\n"""\n )\n', (2309, 2578), False, 'from hamal.conf import utils\n'), ((2639, 2844), 'hamal.conf.utils.fmt', 'utils.fmt', (['"""\nOptional parallelism to pass to scrypt hash function (the `p` parameter).\nThis option is only used when the `password_hash_algorithm` option is set \nto `scrypt`. Defaults to 1.\n"""'], {}), '(\n """\nOptional parallelism to pass to scrypt hash function (the `p` parameter).\nThis option is only used when the `password_hash_algorithm` option is set \nto `scrypt`. Defaults to 1.\n"""\n )\n', (2648, 2844), False, 'from hamal.conf import utils\n')] |
# -*- coding: utf-8 -*-
# This code is licensed under the MIT License (see LICENSE file for details)
import platform
import datetime
import sys
import pathlib
import subprocess
import time
from .. import scope_job_runner
from ..config import scope_configuration
def main():
if len(sys.argv) == 2 and sys.argv[1] == '--install':
install_systemd_units()
else:
check_job_runner()
TIMER_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Timer]
OnBootSec=15min
OnUnitActiveSec=45min
[Install]
WantedBy=timers.target
'''
SERVICE_UNIT = '''[Unit]
Description=Check that scope_job_runner is active if jobs are queued
[Service]
ExecStart={executable}
'''
def install_systemd_units():
base_unit = pathlib.Path('/etc/systemd/system/job_runner_check')
timer_file = base_unit.with_suffix('.timer')
timer_file.write_text(TIMER_UNIT)
timer_file.chmod(0o644)
service_file = base_unit.with_suffix('.service')
service_file.write_text(SERVICE_UNIT.format(executable=sys.argv[0]))
service_file.chmod(0o644)
subprocess.run(['systemctl', 'enable', timer_file.name], check=True)
subprocess.run(['systemctl', 'start', timer_file.name], check=True)
print(f'systemd units installed. Run systemctl status {timer_file.name} or {base_unit.name} to check.')
ERROR_SUBJECT = '{host}: scope job pending but scope_job_runner is inactive.'
ERROR_MESSAGE = '''One or more of your jobs is overdue on {host}, but the scope job runner daemon is not running.
These jobs will not be run until the command `scope_job_runner start` is executed on that machine.
Time: {time}
Queued Jobs:
{jobs}
'''
ALL_CLEAR_SUBJECT = '{host}: scope_job_runner was reactivated.'
ALL_CLEAR_MESSAGE = '''One or more of your jobs on {host} was stalled due to an inactive job runner.
The job runner has now been restarted and your jobs will be run as planned.
Time: {time}
Queued Jobs:
{jobs}
'''
def check_job_runner():
runner = scope_job_runner.JobRunner()
problem_file = scope_configuration.CONFIG_DIR / '.jobs_queued_but_runner_inactive'
overdue_jobs, to_email = get_overdue_jobs(runner)
if len(overdue_jobs) == 0:
return
if runner.is_running():
if problem_file.exists():
# job runner was restarted; problem is cleared.
# Alert previous email recipients that things are good now
print('Previous error, but now job runner is running.')
send_email(to_email, runner, overdue_jobs, ALL_CLEAR_SUBJECT, ALL_CLEAR_MESSAGE, 'all-clear')
# Remove the problem-file flag
problem_file.unlink()
else: # job runner is not running.
print('Jobs queued but job runner is not running.')
previously_emailed = set()
if problem_file.exists():
# this error was previously detected
previously_emailed.update(problem_file.read_text().split('\n'))
to_email -= previously_emailed
if to_email:
# we have not alerted some people about the queued jobs
send_email(to_email, runner, overdue_jobs, ERROR_SUBJECT, ERROR_MESSAGE, 'alert')
problem_file.write_text('\n'.join(to_email | previously_emailed))
else:
print('No alert emailed: all relevant parties have already been emailed.')
def get_overdue_jobs(runner):
# Get overdue jobs that anyone cares about (e.g. that aren't system checks and have
# emails attached).
now = time.time()
exec_dir = pathlib.Path(sys.argv[0]).parent
overdue_jobs = []
to_email = set()
for job in runner.jobs.get_jobs():
if ( job.exec_file.parent != exec_dir and # job is user-provided, not like incubator_check
job.status == scope_job_runner.STATUS_QUEUED and # and is active
job.next_run_time is not None and # and is scheduled to run again
job.next_run_time < now and # and is overdue
job.alert_emails ): # and has a non-empty, non-None list of people to alert
overdue_jobs.append(job)
to_email.update(job.alert_emails)
return overdue_jobs, to_email
def send_email(to_email, runner, jobs, subject_template, body_template, email_type):
host = platform.node().split('.')[0]
now = datetime.datetime.now().isoformat(sep=' ', timespec='seconds')
subject = subject_template.format(host=host)
job_blurbs = '\n'.join(runner.format_job_blurb(job) for job in jobs)
message = body_template.format(host=host, time=now, jobs=job_blurbs)
print('Emailing {} about the following jobs:\n{}'.format(email_type, job_blurbs))
runner.send_error_email(sorted(to_email), subject, message)
| [
"platform.node",
"pathlib.Path",
"subprocess.run",
"datetime.datetime.now",
"time.time"
] | [((761, 813), 'pathlib.Path', 'pathlib.Path', (['"""/etc/systemd/system/job_runner_check"""'], {}), "('/etc/systemd/system/job_runner_check')\n", (773, 813), False, 'import pathlib\n'), ((1089, 1157), 'subprocess.run', 'subprocess.run', (["['systemctl', 'enable', timer_file.name]"], {'check': '(True)'}), "(['systemctl', 'enable', timer_file.name], check=True)\n", (1103, 1157), False, 'import subprocess\n'), ((1162, 1229), 'subprocess.run', 'subprocess.run', (["['systemctl', 'start', timer_file.name]"], {'check': '(True)'}), "(['systemctl', 'start', timer_file.name], check=True)\n", (1176, 1229), False, 'import subprocess\n'), ((3495, 3506), 'time.time', 'time.time', ([], {}), '()\n', (3504, 3506), False, 'import time\n'), ((3522, 3547), 'pathlib.Path', 'pathlib.Path', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3534, 3547), False, 'import pathlib\n'), ((4294, 4317), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4315, 4317), False, 'import datetime\n'), ((4254, 4269), 'platform.node', 'platform.node', ([], {}), '()\n', (4267, 4269), False, 'import platform\n')] |
from systems.plugins.index import BaseProvider
import os
class Provider(BaseProvider('task', 'upload')):
def execute(self, results, params):
file_path = self.get_path(self.field_file)
if not os.path.exists(file_path):
self.command.error("Upload task provider file {} does not exist".format(file_path))
ssh = self._get_ssh()
ssh.upload(file_path, self.field_remote_path,
mode = self.field_mode,
owner = self.field_owner,
group = self.field_group
)
| [
"os.path.exists",
"systems.plugins.index.BaseProvider"
] | [((75, 105), 'systems.plugins.index.BaseProvider', 'BaseProvider', (['"""task"""', '"""upload"""'], {}), "('task', 'upload')\n", (87, 105), False, 'from systems.plugins.index import BaseProvider\n'), ((216, 241), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (230, 241), False, 'import os\n')] |
# ============================================================================
# FILE: outline.py
# AUTHOR: <NAME> (tamura.yasumasa _at_ gmail.com)
# License: MIT license
# ============================================================================
from .base import Base
from subprocess import check_output, CalledProcessError
from denite.util import parse_tagline
import re
import tempfile
OUTLINE_HIGHLIGHT_SYNTAX = [
{'name': 'Name', 'link': 'Identifier', 're': '\S\+\%(\s\+\[\)\@='},
{'name': 'Type', 'link': 'Type', 're': '\[.\{-}\]'},
{'name': 'Ref', 'link': 'Comment', 're': '\s\s.\+'}
]
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'outline'
self.kind = 'file'
self.vars = {
'command': ['ctags'],
'options': [],
'file_opt': '-o',
'ignore_types': [],
'encoding': 'utf-8'
}
def on_init(self, context):
context['__path'] = context['args'][0] if len(
context['args']) > 0 else self.vim.current.buffer.name
def highlight(self):
for syn in OUTLINE_HIGHLIGHT_SYNTAX:
self.vim.command(
'syntax match {0}_{1} /{2}/ contained containedin={0}'.format(
self.syntax_name, syn['name'], syn['re']))
self.vim.command(
'highlight default link {0}_{1} {2}'.format(
self.syntax_name, syn['name'], syn['link']))
def gather_candidates(self, context):
with tempfile.NamedTemporaryFile(
mode='w', encoding=self.vars['encoding']) as tf:
args = []
args += self.vars['command']
args += self.vars['options']
args += [self.vars['file_opt'], tf.name]
args += [context['__path']]
self.print_message(context, args)
tf.close()
try:
check_output(args).decode(self.vars['encoding'], 'replace')
except CalledProcessError:
return []
candidates = []
with open(tf.name, encoding=self.vars['encoding'],
errors='replace') as f:
for line in f:
if re.match('!', line) or not line:
continue
info = parse_tagline(line.rstrip(), tf.name)
candidate = {
'word': info['name'],
'action__path': info['file'],
}
fmt = '{name} [{type}] {file} {ref}'
candidate['abbr'] = fmt.format(**info)
if info['line']:
candidate['action__line'] = info['line']
else:
candidate['action__pattern'] = info['pattern']
candidates.append(candidate)
return candidates
| [
"subprocess.check_output",
"re.match",
"tempfile.NamedTemporaryFile"
] | [((1558, 1627), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w"""', 'encoding': "self.vars['encoding']"}), "(mode='w', encoding=self.vars['encoding'])\n", (1585, 1627), False, 'import tempfile\n'), ((1952, 1970), 'subprocess.check_output', 'check_output', (['args'], {}), '(args)\n', (1964, 1970), False, 'from subprocess import check_output, CalledProcessError\n'), ((2269, 2288), 're.match', 're.match', (['"""!"""', 'line'], {}), "('!', line)\n", (2277, 2288), False, 'import re\n')] |
from bs4 import BeautifulSoup
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
class Post(models.Model):
title = models.CharField(_('post_title'), max_length=100)
body = models.TextField(_('post_body'))
tags = TaggableManager(_('post_tags'), help_text=_('post_tags_help_text'))
create_time = models.DateTimeField(_('post_create_time'), auto_now_add=True)
update_time = models.DateTimeField(_('post_update_time'), auto_now=True)
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
@staticmethod
def autocomplete_search_fields():
return ('id__iexact', 'title__icontains',)
def __str__(self):
return self.title
@property
def cover_url(self):
soup = BeautifulSoup(self.body, 'html.parser')
tags = soup.findAll('img')
return tags[0]['src'] if tags else None
@property
def summary(self):
soup = BeautifulSoup(self.body, 'html.parser')
for br in soup.find_all("br"):
br.replace_with("\n")
ps = [t for t in soup.findAll('p') if t.text.strip()]
return ps[0].text if ps else None
| [
"bs4.BeautifulSoup",
"django.utils.translation.ugettext_lazy"
] | [((216, 231), 'django.utils.translation.ugettext_lazy', '_', (['"""post_title"""'], {}), "('post_title')\n", (217, 231), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((277, 291), 'django.utils.translation.ugettext_lazy', '_', (['"""post_body"""'], {}), "('post_body')\n", (278, 291), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((320, 334), 'django.utils.translation.ugettext_lazy', '_', (['"""post_tags"""'], {}), "('post_tags')\n", (321, 334), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((411, 432), 'django.utils.translation.ugettext_lazy', '_', (['"""post_create_time"""'], {}), "('post_create_time')\n", (412, 432), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((492, 513), 'django.utils.translation.ugettext_lazy', '_', (['"""post_update_time"""'], {}), "('post_update_time')\n", (493, 513), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((570, 579), 'django.utils.translation.ugettext_lazy', '_', (['"""post"""'], {}), "('post')\n", (571, 579), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((610, 620), 'django.utils.translation.ugettext_lazy', '_', (['"""posts"""'], {}), "('posts')\n", (611, 620), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((834, 873), 'bs4.BeautifulSoup', 'BeautifulSoup', (['self.body', '"""html.parser"""'], {}), "(self.body, 'html.parser')\n", (847, 873), False, 'from bs4 import BeautifulSoup\n'), ((1010, 1049), 'bs4.BeautifulSoup', 'BeautifulSoup', (['self.body', '"""html.parser"""'], {}), "(self.body, 'html.parser')\n", (1023, 1049), False, 'from bs4 import BeautifulSoup\n'), ((346, 370), 'django.utils.translation.ugettext_lazy', '_', (['"""post_tags_help_text"""'], {}), "('post_tags_help_text')\n", (347, 370), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import os
from ..utils import add_jobs_argument
from ..utils import add_no_ccache_argument
from ..utils import add_optimize_argument
from ..utils import add_verbose_argument
from ..utils import build_prepare
from ..utils import run
def do_test(_parser, args, _mys_config):
build_prepare(args.verbose, args.optimize, args.no_ccache)
command = [
'make', '-f', 'build/Makefile', 'test', 'TEST=yes'
]
if os.getenv('MAKEFLAGS') is None:
command += ['-j', str(args.jobs)]
if args.debug:
command += ['TRANSPILE_DEBUG=--debug']
run(command, 'Building tests', args.verbose)
run(['./build/test'], 'Running tests', args.verbose)
def add_subparser(subparsers):
subparser = subparsers.add_parser(
'test',
description='Build and run tests.')
add_verbose_argument(subparser)
add_jobs_argument(subparser)
add_optimize_argument(subparser, 'debug')
add_no_ccache_argument(subparser)
subparser.set_defaults(func=do_test)
| [
"os.getenv"
] | [((429, 451), 'os.getenv', 'os.getenv', (['"""MAKEFLAGS"""'], {}), "('MAKEFLAGS')\n", (438, 451), False, 'import os\n')] |
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils import misc
from torch_utils.ops import conv2d_gradfix
import torch.nn.functional as F
import torchvision.transforms as T
import clip
import dnnlib
import random
#----------------------------------------------------------------------------
class Loss:
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, real_features): # to be overridden by subclass
raise NotImplementedError()
class Model(torch.nn.Module):
def __init__(self, device):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(512, 1024)
self.linear2 = torch.nn.Linear(1024, 1024)
self.linear3 = torch.nn.Linear(1024, 1024)
self.linear4 = torch.nn.Linear(1024, 512)
self.linear5 = torch.nn.Linear(512, 1024)
self.linear6 = torch.nn.Linear(1024, 1024)
self.linear7 = torch.nn.Linear(1024, 1024)
self.linear8 = torch.nn.Linear(1024, 512)
self.device = device
def forward(self, x):
mu = F.leaky_relu(self.linear1(x))
mu = F.leaky_relu(self.linear2(mu))
mu = F.leaky_relu(self.linear3(mu))
mu = self.linear4(mu)
std = F.leaky_relu(self.linear5(x))
std = F.leaky_relu(self.linear6(std))
std = F.leaky_relu(self.linear7(std))
std = self.linear8(std)
return mu + std.exp()*(torch.randn(mu.shape).to(self.device))
def loss(self, real, fake, temp=0.1, lam=0.5):
sim = torch.cosine_similarity(real.unsqueeze(1), fake.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp)
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return -(lam*torch.log(sim1) + (1.-lam)*torch.log(sim2))
elif lam == 0:
return -torch.log(sim2)
else:
return -torch.log(sim1)
else:
return -torch.diagonal(sim)
#----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G_mapping, G_synthesis, G_mani, D, augment_pipe=None, style_mixing_prob=0.9, r1_gamma=10, pl_batch_shrink=2, pl_decay=0.01, pl_weight=2):
super().__init__()
self.device = device
self.G_mapping = G_mapping
self.G_synthesis = G_synthesis
self.G_mani = G_mani
self.D = D
self.augment_pipe = augment_pipe
self.style_mixing_prob = style_mixing_prob
self.r1_gamma = r1_gamma
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_weight = pl_weight
self.pl_mean = torch.zeros([], device=device)
clip_model, _ = clip.load("ViT-B/32", device=device) # Load CLIP model here
self.clip_model = clip_model.eval()
self.mapper = Model(device)
self.mapper.load_state_dict(torch.load('./implicit.0.001.64.True.0.0.pth', map_location='cpu')) # path to the noise mapping network
self.mapper.to(device)
def run_G(self, z, c, sync, txt_fts=None, ):
with misc.ddp_sync(self.G_mapping, sync):
ws = self.G_mapping(z, c)
if self.style_mixing_prob > 0:
new_ws = self.G_mapping(torch.randn_like(z), c, skip_w_avg_update=True)
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = new_ws[:, cutoff:]
with misc.ddp_sync(self.G_synthesis, sync):
img = self.G_synthesis(ws, fts=txt_fts)
return img, ws
def run_D(self, img, c, sync, fts=None):
if self.augment_pipe is not None:
img = self.augment_pipe(img)
with misc.ddp_sync(self.D, sync):
logits, d_fts = self.D(img, c, fts=fts)
return logits, d_fts
def normalize(self):
return T.Compose([
T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def full_preprocess(self, img, mode='bicubic', ratio=0.5):
full_size = img.shape[-2]
if full_size < 224:
pad_1 = torch.randint(0, 224-full_size, ())
pad_2 = torch.randint(0, 224-full_size, ())
m = torch.nn.ConstantPad2d((pad_1, 224-full_size-pad_1, pad_2, 224-full_size-pad_2), 1.)
reshaped_img = m(img)
else:
cut_size = torch.randint(int(ratio*full_size), full_size, ())
left = torch.randint(0, full_size-cut_size, ())
top = torch.randint(0, full_size-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def custom_preprocess(self, img, ind, cut_num, mode='bicubic'): # more to be implemented here
full_size = img.shape[-2]
grid = np.sqrt(cut_num)
most_right = min(int((ind%grid + 1)*full_size/grid), full_size)
most_bottom = min(int((ind//grid + 1)*full_size/grid), full_size)
cut_size = torch.randint(int(full_size//(grid+1)), int(min(min(full_size//2, most_right), most_bottom)), ()) # TODO: tune this later
left = torch.randint(0, most_right-cut_size, ())
top = torch.randint(0, most_bottom-cut_size, ())
cropped_img = img[:, :, top:top+cut_size, left:left+cut_size]
reshaped_img = F.interpolate(cropped_img, (224, 224), mode=mode, align_corners=False)
reshaped_img = (reshaped_img + 1.)*0.5 # range in [0., 1.] now
reshaped_img = self.normalize()(reshaped_img)
return reshaped_img
def contra_loss(self, temp, mat1, mat2, lam):
sim = torch.cosine_similarity(mat1.unsqueeze(1), mat2.unsqueeze(0), dim=-1)
if temp > 0.:
sim = torch.exp(sim/temp) # This implementation is incorrect, it should be sim=sim/temp.
# However, this incorrect implementation can reproduce our results with provided hyper-parameters.
# If you want to use the correct implementation, please manually revise it.
# The correct implementation should lead to better results, but don't use our provided hyper-parameters, you need to carefully tune lam, temp, itd, itc and other hyper-parameters
sim1 = torch.diagonal(F.softmax(sim, dim=1))*temp
sim2 = torch.diagonal(F.softmax(sim, dim=0))*temp
if 0.<lam < 1.:
return lam*torch.log(sim1) + (1.-lam)*torch.log(sim2)
elif lam == 0:
return torch.log(sim2)
else:
return torch.log(sim1)
else:
return torch.diagonal(sim)
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, sync, gain, img_fts, txt_fts, lam, temp, gather, d_use_fts, itd, itc, iid, iic, mixing_prob=0.):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
do_Gmain = (phase in ['Gmain', 'Gboth'])
do_Dmain = (phase in ['Dmain', 'Dboth'])
do_Gpl = (phase in ['Greg', 'Gboth']) and (self.pl_weight != 0)
do_Dr1 = (phase in ['Dreg', 'Dboth']) and (self.r1_gamma != 0)
# augmentation
aug_level_1 = 0.1
aug_level_2 = 0.75
# print(torch.cosine_similarity(img_fts, txt_fts, dim=-1))
mixing_prob = mixing_prob # probability to use img_fts instead of txt_fts
random_noise = torch.randn(txt_fts.shape).to(img_fts.device)# + torch.randn((1, 512)).to(img_fts.device)
random_noise = random_noise/random_noise.norm(dim=-1, keepdim=True)
txt_fts_ = txt_fts*(1-aug_level_1) + random_noise*aug_level_1
txt_fts_ = txt_fts_/txt_fts_.norm(dim=-1, keepdim=True)
if txt_fts.shape[-1] == img_fts.shape[-1]:
# # Gaussian purterbation
img_fts_ = img_fts*(1-aug_level_2) + random_noise*aug_level_2
# learned generation
# with torch.no_grad():
# normed_real_full_img = self.full_preprocess(real_img, ratio=0.99)
# img_fts_real_full_ = self.clip_model.encode_image(normed_real_full_img).float()
# img_fts_real_full_ = img_fts_real_full_/img_fts_real_full_.norm(dim=-1, keepdim=True)
# # img_fts_real_full_ = img_fts
# img_fts_ = self.mapper(img_fts_real_full_) + img_fts_real_full_
img_fts_ = img_fts_/img_fts_.norm(dim=-1, keepdim=True)
if mixing_prob > 0.99:
txt_fts_ = img_fts_
elif mixing_prob < 0.01:
txt_fts_ = txt_fts_
else:
txt_fts_ = torch.where(torch.rand([txt_fts_.shape[0], 1], device=txt_fts_.device) < mixing_prob, img_fts_, txt_fts_)
img_img_d = iid # discriminator
img_img_c = iic # clip
img_txt_d = itd # discriminator
img_txt_c = itc # clip
temp = temp
lam = lam
def gather_tensor(input_tensor, gather_or_not):
if gather_or_not:
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
output_tensor = [torch.zeros_like(input_tensor) for _ in range(world_size)]
torch.distributed.all_gather(output_tensor, input_tensor)
output_tensor[rank] = input_tensor
# # print(torch.cat(output_tensor).size())
return torch.cat(output_tensor)
else:
return input_tensor
txt_fts_all = gather_tensor(txt_fts_, gather)
# Gmain: Maximize logits for generated images.
if do_Gmain:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=(sync and not do_Gpl)) # May get synced by Gpl.
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_)
gen_d_fts_all = gather_tensor(gen_d_fts, gather)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits) # -log(sigmoid(gen_logits))
normed_gen_full_img = self.full_preprocess(gen_img)
img_fts_gen_full = self.clip_model.encode_image(normed_gen_full_img)
img_fts_gen_full = img_fts_gen_full/img_fts_gen_full.norm(dim=-1, keepdim=True)
img_fts_gen_full_all = gather_tensor(img_fts_gen_full, gather)
img_fts_all = gather_tensor(img_fts, gather)
if img_txt_c > 0.:
clip_loss_img_txt = self.contra_loss(temp, img_fts_gen_full_all, txt_fts_all, lam)
loss_Gmain = loss_Gmain - img_txt_c*clip_loss_img_txt.mean()
if img_img_c > 0.:
clip_loss_img_img = self.contra_loss(temp, img_fts_gen_full_all, img_fts_all, lam)
loss_Gmain = loss_Gmain - img_img_c*clip_loss_img_img.mean()
if img_txt_d > 0.:
loss_Gmain = loss_Gmain - img_txt_d*self.contra_loss(temp, gen_d_fts_all, txt_fts_all, lam).mean()
if img_img_d > 0.:
with torch.no_grad():
_, g_real_d_fts = self.run_D(real_img.detach(), real_c, sync=False, fts=txt_fts_)
g_real_d_fts_all = gather_tensor(g_real_d_fts, gather)
loss_Gmain = loss_Gmain - img_img_d*self.contra_loss(temp, g_real_d_fts_all, gen_d_fts_all, lam).mean()
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Gpl: Apply path length regularization.
if do_Gpl:
with torch.autograd.profiler.record_function('Gpl_forward'):
batch_size = gen_z.shape[0] // self.pl_batch_shrink
txt_fts_0 = txt_fts_[:batch_size]
txt_fts_0.requires_grad_()
gen_img, gen_ws = self.run_G(gen_z[:batch_size], gen_c[:batch_size], txt_fts=txt_fts_0, sync=sync)
pl_noise = torch.randn_like(gen_img) / np.sqrt(gen_img.shape[2] * gen_img.shape[3])
with torch.autograd.profiler.record_function('pl_grads'), conv2d_gradfix.no_weight_gradients():
if d_use_fts:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws, txt_fts_0], create_graph=True, only_inputs=True)[0]
else:
pl_grads = torch.autograd.grad(outputs=[(gen_img * pl_noise).sum()], inputs=[gen_ws], create_graph=True, only_inputs=True)[0]
pl_lengths = pl_grads.square().sum(2).mean(1).sqrt()
pl_mean = self.pl_mean.lerp(pl_lengths.mean(), self.pl_decay)
self.pl_mean.copy_(pl_mean.detach())
pl_penalty = (pl_lengths - pl_mean).square()
training_stats.report('Loss/pl_penalty', pl_penalty)
loss_Gpl = pl_penalty * self.pl_weight
training_stats.report('Loss/G/reg', loss_Gpl)
with torch.autograd.profiler.record_function('Gpl_backward'):
(gen_img[:, 0, 0, 0] * 0 + loss_Gpl).mean().mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if do_Dmain:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, txt_fts=txt_fts_, sync=False)
gen_logits, gen_d_fts = self.run_D(gen_img, gen_c, sync=False, fts=txt_fts_) # Gets synced by loss_Dreal.
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits) # -log(1 - sigmoid(gen_logits))
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if do_Dmain or do_Dr1:
name = 'Dreal_Dr1' if do_Dmain and do_Dr1 else 'Dreal' if do_Dmain else 'Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp = real_img.detach().requires_grad_(do_Dr1)
real_logits, real_d_fts = self.run_D(real_img_tmp, real_c, sync=sync, fts=txt_fts_)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if do_Dmain:
loss_Dreal = torch.nn.functional.softplus(-real_logits) # -log(sigmoid(real_logits))
if img_txt_d > 0.:
real_d_fts_all = gather_tensor(real_d_fts, gather)
loss_Dreal = loss_Dreal - img_txt_d*self.contra_loss(temp, real_d_fts_all, txt_fts_all, lam).mean()
training_stats.report('Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if do_Dr1:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[real_img_tmp], create_graph=True, only_inputs=True)[0]
r1_penalty = r1_grads.square().sum([1,2,3])
loss_Dr1 = r1_penalty * (self.r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(real_logits * 0 + loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# ----------------------------------------------------------------------------
| [
"numpy.sqrt",
"torch.exp",
"torch.full_like",
"torch.autograd.profiler.record_function",
"torch.nn.functional.interpolate",
"torch.distributed.get_rank",
"torch_utils.training_stats.report",
"torch.nn.functional.softmax",
"torch.randint",
"torch.zeros_like",
"torch.randn",
"torch.distributed.a... | [((635, 661), 'torch.nn.Linear', 'torch.nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (650, 661), False, 'import torch\n'), ((685, 712), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (700, 712), False, 'import torch\n'), ((736, 763), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (751, 763), False, 'import torch\n'), ((787, 813), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (802, 813), False, 'import torch\n'), ((837, 863), 'torch.nn.Linear', 'torch.nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (852, 863), False, 'import torch\n'), ((887, 914), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (902, 914), False, 'import torch\n'), ((938, 965), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(1024)'], {}), '(1024, 1024)\n', (953, 965), False, 'import torch\n'), ((989, 1015), 'torch.nn.Linear', 'torch.nn.Linear', (['(1024)', '(512)'], {}), '(1024, 512)\n', (1004, 1015), False, 'import torch\n'), ((2791, 2821), 'torch.zeros', 'torch.zeros', (['[]'], {'device': 'device'}), '([], device=device)\n', (2802, 2821), False, 'import torch\n'), ((2846, 2882), 'clip.load', 'clip.load', (['"""ViT-B/32"""'], {'device': 'device'}), "('ViT-B/32', device=device)\n", (2855, 2882), False, 'import clip\n'), ((5486, 5502), 'numpy.sqrt', 'np.sqrt', (['cut_num'], {}), '(cut_num)\n', (5493, 5502), True, 'import numpy as np\n'), ((5814, 5857), 'torch.randint', 'torch.randint', (['(0)', '(most_right - cut_size)', '()'], {}), '(0, most_right - cut_size, ())\n', (5827, 5857), False, 'import torch\n'), ((5870, 5914), 'torch.randint', 'torch.randint', (['(0)', '(most_bottom - cut_size)', '()'], {}), '(0, most_bottom - cut_size, ())\n', (5883, 5914), False, 'import torch\n'), ((6006, 6076), 'torch.nn.functional.interpolate', 'F.interpolate', (['cropped_img', '(224, 224)'], {'mode': 'mode', 'align_corners': '(False)'}), '(cropped_img, (224, 224), mode=mode, align_corners=False)\n', (6019, 6076), True, 'import torch.nn.functional as F\n'), ((1651, 1672), 'torch.exp', 'torch.exp', (['(sim / temp)'], {}), '(sim / temp)\n', (1660, 1672), False, 'import torch\n'), ((3023, 3089), 'torch.load', 'torch.load', (['"""./implicit.0.001.64.True.0.0.pth"""'], {'map_location': '"""cpu"""'}), "('./implicit.0.001.64.True.0.0.pth', map_location='cpu')\n", (3033, 3089), False, 'import torch\n'), ((3238, 3273), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_mapping', 'sync'], {}), '(self.G_mapping, sync)\n', (3251, 3273), False, 'from torch_utils import misc\n'), ((3878, 3915), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.G_synthesis', 'sync'], {}), '(self.G_synthesis, sync)\n', (3891, 3915), False, 'from torch_utils import misc\n'), ((4134, 4161), 'torch_utils.misc.ddp_sync', 'misc.ddp_sync', (['self.D', 'sync'], {}), '(self.D, sync)\n', (4147, 4161), False, 'from torch_utils import misc\n'), ((4567, 4604), 'torch.randint', 'torch.randint', (['(0)', '(224 - full_size)', '()'], {}), '(0, 224 - full_size, ())\n', (4580, 4604), False, 'import torch\n'), ((4623, 4660), 'torch.randint', 'torch.randint', (['(0)', '(224 - full_size)', '()'], {}), '(0, 224 - full_size, ())\n', (4636, 4660), False, 'import torch\n'), ((4675, 4772), 'torch.nn.ConstantPad2d', 'torch.nn.ConstantPad2d', (['(pad_1, 224 - full_size - pad_1, pad_2, 224 - full_size - pad_2)', '(1.0)'], {}), '((pad_1, 224 - full_size - pad_1, pad_2, 224 -\n full_size - pad_2), 1.0)\n', (4697, 4772), False, 'import torch\n'), ((4901, 4943), 'torch.randint', 'torch.randint', (['(0)', '(full_size - cut_size)', '()'], {}), '(0, full_size - cut_size, ())\n', (4914, 4943), False, 'import torch\n'), ((4960, 5002), 'torch.randint', 'torch.randint', (['(0)', '(full_size - cut_size)', '()'], {}), '(0, full_size - cut_size, ())\n', (4973, 5002), False, 'import torch\n'), ((5102, 5172), 'torch.nn.functional.interpolate', 'F.interpolate', (['cropped_img', '(224, 224)'], {'mode': 'mode', 'align_corners': '(False)'}), '(cropped_img, (224, 224), mode=mode, align_corners=False)\n', (5115, 5172), True, 'import torch.nn.functional as F\n'), ((6418, 6439), 'torch.exp', 'torch.exp', (['(sim / temp)'], {}), '(sim / temp)\n', (6427, 6439), False, 'import torch\n'), ((7269, 7288), 'torch.diagonal', 'torch.diagonal', (['sim'], {}), '(sim)\n', (7283, 7288), False, 'import torch\n'), ((2055, 2074), 'torch.diagonal', 'torch.diagonal', (['sim'], {}), '(sim)\n', (2069, 2074), False, 'import torch\n'), ((4313, 4404), 'torchvision.transforms.Normalize', 'T.Normalize', (['(0.48145466, 0.4578275, 0.40821073)', '(0.26862954, 0.26130258, 0.27577711)'], {}), '((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, \n 0.27577711))\n', (4324, 4404), True, 'import torchvision.transforms as T\n'), ((8042, 8068), 'torch.randn', 'torch.randn', (['txt_fts.shape'], {}), '(txt_fts.shape)\n', (8053, 8068), False, 'import torch\n'), ((9714, 9748), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (9746, 9748), False, 'import torch\n'), ((9772, 9800), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (9798, 9800), False, 'import torch\n'), ((9909, 9966), 'torch.distributed.all_gather', 'torch.distributed.all_gather', (['output_tensor', 'input_tensor'], {}), '(output_tensor, input_tensor)\n', (9937, 9966), False, 'import torch\n'), ((10098, 10122), 'torch.cat', 'torch.cat', (['output_tensor'], {}), '(output_tensor)\n', (10107, 10122), False, 'import torch\n'), ((10334, 10390), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_forward"""'], {}), "('Gmain_forward')\n", (10373, 10390), False, 'import torch\n'), ((10731, 10784), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', 'gen_logits'], {}), "('Loss/scores/fake', gen_logits)\n", (10752, 10784), False, 'from torch_utils import training_stats\n'), ((10905, 10946), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['(-gen_logits)'], {}), '(-gen_logits)\n', (10933, 10946), False, 'import torch\n'), ((12451, 12499), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/loss"""', 'loss_Gmain'], {}), "('Loss/G/loss', loss_Gmain)\n", (12472, 12499), False, 'from torch_utils import training_stats\n'), ((12517, 12574), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gmain_backward"""'], {}), "('Gmain_backward')\n", (12556, 12574), False, 'import torch\n'), ((12717, 12771), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_forward"""'], {}), "('Gpl_forward')\n", (12756, 12771), False, 'import torch\n'), ((13910, 13962), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/pl_penalty"""', 'pl_penalty'], {}), "('Loss/pl_penalty', pl_penalty)\n", (13931, 13962), False, 'from torch_utils import training_stats\n'), ((14034, 14079), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/G/reg"""', 'loss_Gpl'], {}), "('Loss/G/reg', loss_Gpl)\n", (14055, 14079), False, 'from torch_utils import training_stats\n'), ((14097, 14152), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Gpl_backward"""'], {}), "('Gpl_backward')\n", (14136, 14152), False, 'import torch\n'), ((14351, 14406), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_forward"""'], {}), "('Dgen_forward')\n", (14390, 14406), False, 'import torch\n'), ((14636, 14689), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/fake"""', 'gen_logits'], {}), "('Loss/scores/fake', gen_logits)\n", (14657, 14689), False, 'from torch_utils import training_stats\n'), ((14794, 14834), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['gen_logits'], {}), '(gen_logits)\n', (14822, 14834), False, 'import torch\n'), ((14884, 14940), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""Dgen_backward"""'], {}), "('Dgen_backward')\n", (14923, 14940), False, 'import torch\n'), ((15225, 15283), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_forward')"], {}), "(name + '_forward')\n", (15264, 15283), False, 'import torch\n'), ((15473, 15527), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/scores/real"""', 'real_logits'], {}), "('Loss/scores/real', real_logits)\n", (15494, 15527), False, 'from torch_utils import training_stats\n'), ((16716, 16775), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (["(name + '_backward')"], {}), "(name + '_backward')\n", (16755, 16775), False, 'import torch\n'), ((1705, 1726), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(1)'}), '(sim, dim=1)\n', (1714, 1726), True, 'import torch.nn.functional as F\n'), ((1767, 1788), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (1776, 1788), True, 'import torch.nn.functional as F\n'), ((3413, 3432), 'torch.randn_like', 'torch.randn_like', (['z'], {}), '(z)\n', (3429, 3432), False, 'import torch\n'), ((3483, 3538), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""style_mixing"""'], {}), "('style_mixing')\n", (3522, 3538), False, 'import torch\n'), ((6925, 6946), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(1)'}), '(sim, dim=1)\n', (6934, 6946), True, 'import torch.nn.functional as F\n'), ((6987, 7008), 'torch.nn.functional.softmax', 'F.softmax', (['sim'], {'dim': '(0)'}), '(sim, dim=0)\n', (6996, 7008), True, 'import torch.nn.functional as F\n'), ((7163, 7178), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (7172, 7178), False, 'import torch\n'), ((7220, 7235), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (7229, 7235), False, 'import torch\n'), ((9834, 9864), 'torch.zeros_like', 'torch.zeros_like', (['input_tensor'], {}), '(input_tensor)\n', (9850, 9864), False, 'import torch\n'), ((13076, 13101), 'torch.randn_like', 'torch.randn_like', (['gen_img'], {}), '(gen_img)\n', (13092, 13101), False, 'import torch\n'), ((13104, 13148), 'numpy.sqrt', 'np.sqrt', (['(gen_img.shape[2] * gen_img.shape[3])'], {}), '(gen_img.shape[2] * gen_img.shape[3])\n', (13111, 13148), True, 'import numpy as np\n'), ((13170, 13221), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""pl_grads"""'], {}), "('pl_grads')\n", (13209, 13221), False, 'import torch\n'), ((13223, 13259), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (13257, 13259), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((15699, 15741), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['(-real_logits)'], {}), '(-real_logits)\n', (15727, 15741), False, 'import torch\n'), ((16050, 16110), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/loss"""', '(loss_Dgen + loss_Dreal)'], {}), "('Loss/D/loss', loss_Dgen + loss_Dreal)\n", (16071, 16110), False, 'from torch_utils import training_stats\n'), ((16579, 16631), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/r1_penalty"""', 'r1_penalty'], {}), "('Loss/r1_penalty', r1_penalty)\n", (16600, 16631), False, 'from torch_utils import training_stats\n'), ((16652, 16697), 'torch_utils.training_stats.report', 'training_stats.report', (['"""Loss/D/reg"""', 'loss_Dr1'], {}), "('Loss/D/reg', loss_Dr1)\n", (16673, 16697), False, 'from torch_utils import training_stats\n'), ((1432, 1453), 'torch.randn', 'torch.randn', (['mu.shape'], {}), '(mu.shape)\n', (1443, 1453), False, 'import torch\n'), ((1947, 1962), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (1956, 1962), False, 'import torch\n'), ((2005, 2020), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (2014, 2020), False, 'import torch\n'), ((3754, 3790), 'torch.full_like', 'torch.full_like', (['cutoff', 'ws.shape[1]'], {}), '(cutoff, ws.shape[1])\n', (3769, 3790), False, 'import torch\n'), ((7070, 7085), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (7079, 7085), False, 'import torch\n'), ((7097, 7112), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (7106, 7112), False, 'import torch\n'), ((12112, 12127), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12125, 12127), False, 'import torch\n'), ((16193, 16244), 'torch.autograd.profiler.record_function', 'torch.autograd.profiler.record_function', (['"""r1_grads"""'], {}), "('r1_grads')\n", (16232, 16244), False, 'import torch\n'), ((16246, 16282), 'torch_utils.ops.conv2d_gradfix.no_weight_gradients', 'conv2d_gradfix.no_weight_gradients', ([], {}), '()\n', (16280, 16282), False, 'from torch_utils.ops import conv2d_gradfix\n'), ((1852, 1867), 'torch.log', 'torch.log', (['sim1'], {}), '(sim1)\n', (1861, 1867), False, 'import torch\n'), ((1879, 1894), 'torch.log', 'torch.log', (['sim2'], {}), '(sim2)\n', (1888, 1894), False, 'import torch\n'), ((3569, 3621), 'torch.empty', 'torch.empty', (['[]'], {'dtype': 'torch.int64', 'device': 'ws.device'}), '([], dtype=torch.int64, device=ws.device)\n', (3580, 3621), False, 'import torch\n'), ((3687, 3719), 'torch.rand', 'torch.rand', (['[]'], {'device': 'ws.device'}), '([], device=ws.device)\n', (3697, 3719), False, 'import torch\n'), ((9300, 9358), 'torch.rand', 'torch.rand', (['[txt_fts_.shape[0], 1]'], {'device': 'txt_fts_.device'}), '([txt_fts_.shape[0], 1], device=txt_fts_.device)\n', (9310, 9358), False, 'import torch\n')] |
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def do_ml(merged_df, test_size, ml_model, **kwargs):
train_data = merged_df.drop(
columns=[
"lagged_poc",
"price_date",
"label_id",
# "Low",
# "High",
# "Open",
# "Close",
# "Adj Close",
# "positive_poc",
"negative_poc",
]
)
target = merged_df[["lagged_poc"]]
X_train, X_test, y_train, y_test = train_test_split(
np.array(train_data), np.array(target), test_size=test_size, random_state=1
)
model = ml_model(**kwargs)
# Fit on training data
model.fit(X_train, np.ravel(y_train))
# Actual class predictions
predictions = model.predict(X_test)
confusion_matrix = metrics.confusion_matrix(y_test, predictions)
accuracy_score = metrics.accuracy_score(y_test, predictions)
# feature importance
plot_feature_importance(model, train_data)
return confusion_matrix, accuracy_score
def plot_feature_importance(model, train_data):
featureImportances = model.feature_importances_
fiDF = pd.DataFrame()
fiDF["fi"] = featureImportances
fiDF["f"] = train_data.columns
fiDF = fiDF.sort_values("fi", ascending=False)
fiDF.head()
nf = 50
plt.rcParams.update({"font.size": 8})
plt.figure(figsize=(8, 4))
plt.plot(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf])
plt.xticks(rotation=90)
plt.show()
| [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.array",
"pandas.DataFrame",
"numpy.ravel",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusion_matrix"
] | [((992, 1037), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1016, 1037), False, 'from sklearn import metrics\n'), ((1059, 1102), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'predictions'], {}), '(y_test, predictions)\n', (1081, 1102), False, 'from sklearn import metrics\n'), ((1334, 1348), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1346, 1348), True, 'import pandas as pd\n'), ((1503, 1540), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 8}"], {}), "({'font.size': 8})\n", (1522, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (1555, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1623), 'matplotlib.pyplot.plot', 'plt.plot', (['fiDF.f.iloc[0:nf]', 'fiDF.fi.iloc[0:nf]'], {}), '(fiDF.f.iloc[0:nf], fiDF.fi.iloc[0:nf])\n', (1584, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1628, 1651), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (1638, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1656, 1666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1664, 1666), True, 'import matplotlib.pyplot as plt\n'), ((712, 732), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (720, 732), True, 'import numpy as np\n'), ((734, 750), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (742, 750), True, 'import numpy as np\n'), ((877, 894), 'numpy.ravel', 'np.ravel', (['y_train'], {}), '(y_train)\n', (885, 894), True, 'import numpy as np\n')] |
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple cross-platform helper to create an RPM package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fileinput
import os
import re
import shutil
import subprocess
import sys
from tempfile import mkdtemp
# pylint: disable=g-direct-third-party-import
from third_party.py import gflags
gflags.DEFINE_string('name', '', 'The name of the software being packaged.')
gflags.DEFINE_string('version', '',
'The version of the software being packaged.')
gflags.DEFINE_string('release', '',
'The release of the software being packaged.')
gflags.DEFINE_string('arch', '',
'The CPU architecture of the software being packaged.')
gflags.DEFINE_string('spec_file', '',
'The file containing the RPM specification.')
gflags.DEFINE_string('out_file', '',
'The destination to save the resulting RPM file to.')
# Setup to safely create a temporary directory and clean it up when done.
@contextlib.contextmanager
def Cd(newdir, cleanup=lambda: True):
"""Change the current working directory.
This will run the provided cleanup function when the context exits and the
previous working directory is restored.
Args:
newdir: The directory to change to. This must already exist.
cleanup: An optional cleanup function to be executed when the context exits.
Yields:
Nothing.
"""
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def Tempdir():
"""Create a new temporary directory and change to it.
The temporary directory will be removed when the context exits.
Yields:
The full path of the temporary directory.
"""
dirpath = mkdtemp()
def Cleanup():
shutil.rmtree(dirpath)
with Cd(dirpath, Cleanup):
yield dirpath
def GetFlagValue(flagvalue, strip=True):
if flagvalue:
if flagvalue[0] == '@':
with open(flagvalue[1:], 'r') as f:
flagvalue = f.read()
if strip:
return flagvalue.strip()
return flagvalue
WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE)
def FindOutputFile(log):
"""Find the written file from the log information."""
m = WROTE_FILE_RE.search(log)
if m:
return m.group('rpm_path')
return None
def CopyAndRewrite(input_file, output_file, replacements=None):
"""Copies the given file and optionally rewrites with replacements.
Args:
input_file: The file to copy.
output_file: The file to write to.
replacements: A dictionary of replacements.
Keys are prefixes scan for, values are the replacements to write after
the prefix.
"""
with open(output_file, 'w') as output:
for line in fileinput.input(input_file):
if replacements:
for prefix, text in replacements.items():
if line.startswith(prefix):
line = prefix + ' ' + text + '\n'
break
output.write(line)
def Which(program):
def IsExe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(path, program)
if IsExe(filename):
return filename
return None
class NoRpmbuildFound(Exception):
pass
def FindRpmbuild():
path = Which('rpmbuild')
if path:
return path
else:
raise NoRpmbuildFound()
class RpmBuilder(object):
"""A helper class to manage building the RPM file."""
SOURCE_DIR = 'SOURCES'
BUILD_DIR = 'BUILD'
TEMP_DIR = 'TMP'
DIRS = [SOURCE_DIR, BUILD_DIR, TEMP_DIR]
def __init__(self, name, version, release, arch):
self.name = name
self.version = GetFlagValue(version)
self.release = GetFlagValue(release)
self.arch = arch
self.files = []
self.rpmbuild_path = FindRpmbuild()
self.rpm_path = None
def AddFiles(self, files):
"""Add a set of files to the current RPM."""
self.files += files
def SetupWorkdir(self, spec_file, original_dir):
"""Create the needed structure in the workdir."""
# Create directory structure.
for name in RpmBuilder.DIRS:
if not os.path.exists(name):
os.makedirs(name, 0o777)
# Copy the files.
for f in self.files:
dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f))
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0o777)
shutil.copy(os.path.join(original_dir, f), dst_dir)
# Copy the spec file, updating with the correct version.
spec_origin = os.path.join(original_dir, spec_file)
self.spec_file = os.path.basename(spec_file)
replacements = {}
if self.version:
replacements['Version:'] = self.version
if self.release:
replacements['Release:'] = self.release
CopyAndRewrite(spec_origin, self.spec_file, replacements)
def CallRpmBuild(self, dirname):
"""Call rpmbuild with the correct arguments."""
args = [
self.rpmbuild_path,
'--define',
'_topdir %s' % dirname,
'--define',
'_tmppath %s/TMP' % dirname,
'--bb',
self.spec_file,
]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode == 0:
# Find the created file.
self.rpm_path = FindOutputFile(output)
if p.returncode != 0 or not self.rpm_path:
print('Error calling rpmbuild:')
print(output)
# Return the status.
return p.returncode
def SaveResult(self, out_file):
"""Save the result RPM out of the temporary working directory."""
if self.rpm_path:
shutil.copy(self.rpm_path, out_file)
print('Saved RPM file to %s' % out_file)
else:
print('No RPM file created.')
def Build(self, spec_file, out_file):
"""Build the RPM described by the spec_file."""
print('Building RPM for %s at %s' % (self.name, out_file))
original_dir = os.getcwd()
spec_file = os.path.join(original_dir, spec_file)
out_file = os.path.join(original_dir, out_file)
with Tempdir() as dirname:
self.SetupWorkdir(spec_file, original_dir)
status = self.CallRpmBuild(dirname)
self.SaveResult(out_file)
return status
def main(argv=()):
try:
builder = RpmBuilder(FLAGS.name, FLAGS.version, FLAGS.release, FLAGS.arch)
builder.AddFiles(argv[1:])
return builder.Build(FLAGS.spec_file, FLAGS.out_file)
except NoRpmbuildFound:
print('ERROR: rpmbuild is required but is not present in PATH')
return 1
if __name__ == '__main__':
FLAGS = gflags.FLAGS
main(FLAGS(sys.argv))
| [
"os.path.exists",
"os.makedirs",
"re.compile",
"subprocess.Popen",
"os.access",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.isfile",
"os.path.dirname",
"tempfile.mkdtemp",
"os.path.basename",
"shutil.copy",
"shutil.rmtree",
"fileinput.input",
"os.path.expanduser",
"third_party.... | [((985, 1061), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""name"""', '""""""', '"""The name of the software being packaged."""'], {}), "('name', '', 'The name of the software being packaged.')\n", (1005, 1061), False, 'from third_party.py import gflags\n'), ((1062, 1148), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""version"""', '""""""', '"""The version of the software being packaged."""'], {}), "('version', '',\n 'The version of the software being packaged.')\n", (1082, 1148), False, 'from third_party.py import gflags\n'), ((1166, 1252), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""release"""', '""""""', '"""The release of the software being packaged."""'], {}), "('release', '',\n 'The release of the software being packaged.')\n", (1186, 1252), False, 'from third_party.py import gflags\n'), ((1270, 1362), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""arch"""', '""""""', '"""The CPU architecture of the software being packaged."""'], {}), "('arch', '',\n 'The CPU architecture of the software being packaged.')\n", (1290, 1362), False, 'from third_party.py import gflags\n'), ((1381, 1468), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""spec_file"""', '""""""', '"""The file containing the RPM specification."""'], {}), "('spec_file', '',\n 'The file containing the RPM specification.')\n", (1401, 1468), False, 'from third_party.py import gflags\n'), ((1486, 1580), 'third_party.py.gflags.DEFINE_string', 'gflags.DEFINE_string', (['"""out_file"""', '""""""', '"""The destination to save the resulting RPM file to."""'], {}), "('out_file', '',\n 'The destination to save the resulting RPM file to.')\n", (1506, 1580), False, 'from third_party.py import gflags\n'), ((2801, 2852), 're.compile', 're.compile', (['"""Wrote: (?P<rpm_path>.+)"""', 're.MULTILINE'], {}), "('Wrote: (?P<rpm_path>.+)', re.MULTILINE)\n", (2811, 2852), False, 'import re\n'), ((2100, 2111), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2109, 2111), False, 'import os\n'), ((2458, 2467), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (2465, 2467), False, 'from tempfile import mkdtemp\n'), ((2123, 2149), 'os.path.expanduser', 'os.path.expanduser', (['newdir'], {}), '(newdir)\n', (2141, 2149), False, 'import os\n'), ((2183, 2200), 'os.chdir', 'os.chdir', (['prevdir'], {}), '(prevdir)\n', (2191, 2200), False, 'import os\n'), ((2490, 2512), 'shutil.rmtree', 'shutil.rmtree', (['dirpath'], {}), '(dirpath)\n', (2503, 2512), False, 'import shutil\n'), ((3447, 3474), 'fileinput.input', 'fileinput.input', (['input_file'], {}), '(input_file)\n', (3462, 3474), False, 'import fileinput\n'), ((3850, 3877), 'os.path.join', 'os.path.join', (['path', 'program'], {}), '(path, program)\n', (3862, 3877), False, 'import os\n'), ((5226, 5263), 'os.path.join', 'os.path.join', (['original_dir', 'spec_file'], {}), '(original_dir, spec_file)\n', (5238, 5263), False, 'import os\n'), ((5285, 5312), 'os.path.basename', 'os.path.basename', (['spec_file'], {}), '(spec_file)\n', (5301, 5312), False, 'import os\n'), ((5824, 5896), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n', (5840, 5896), False, 'import subprocess\n'), ((6629, 6640), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6638, 6640), False, 'import os\n'), ((6657, 6694), 'os.path.join', 'os.path.join', (['original_dir', 'spec_file'], {}), '(original_dir, spec_file)\n', (6669, 6694), False, 'import os\n'), ((6710, 6746), 'os.path.join', 'os.path.join', (['original_dir', 'out_file'], {}), '(original_dir, out_file)\n', (6722, 6746), False, 'import os\n'), ((3730, 3751), 'os.path.isfile', 'os.path.isfile', (['fpath'], {}), '(fpath)\n', (3744, 3751), False, 'import os\n'), ((3756, 3781), 'os.access', 'os.access', (['fpath', 'os.X_OK'], {}), '(fpath, os.X_OK)\n', (3765, 3781), False, 'import os\n'), ((6323, 6359), 'shutil.copy', 'shutil.copy', (['self.rpm_path', 'out_file'], {}), '(self.rpm_path, out_file)\n', (6334, 6359), False, 'import shutil\n'), ((4840, 4860), 'os.path.exists', 'os.path.exists', (['name'], {}), '(name)\n', (4854, 4860), False, 'import os\n'), ((4870, 4892), 'os.makedirs', 'os.makedirs', (['name', '(511)'], {}), '(name, 511)\n', (4881, 4892), False, 'import os\n'), ((4994, 5012), 'os.path.dirname', 'os.path.dirname', (['f'], {}), '(f)\n', (5009, 5012), False, 'import os\n'), ((5027, 5050), 'os.path.exists', 'os.path.exists', (['dst_dir'], {}), '(dst_dir)\n', (5041, 5050), False, 'import os\n'), ((5060, 5085), 'os.makedirs', 'os.makedirs', (['dst_dir', '(511)'], {}), '(dst_dir, 511)\n', (5071, 5085), False, 'import os\n'), ((5106, 5135), 'os.path.join', 'os.path.join', (['original_dir', 'f'], {}), '(original_dir, f)\n', (5118, 5135), False, 'import os\n')] |
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
# Re-analyze all images that don't have latest version of the analysis available
from freta.api import Freta
def main():
freta = Freta()
versions = freta.versions()
for image in freta.image.list():
if (
image["state"] == "Report available"
and image["analysis_version"] != versions["analysis"]
):
print("redoing %s" % image["image_id"])
freta.image.analyze(image["image_id"])
if __name__ == "__main__":
main()
| [
"freta.api.Freta"
] | [((235, 242), 'freta.api.Freta', 'Freta', ([], {}), '()\n', (240, 242), False, 'from freta.api import Freta\n')] |
import numpy as np
import tensorflow as tf
from keras import backend as K
from tqdm import tqdm
def write_log(callback, names, logs, batch_no):
for name, value in zip(names, logs):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary(summary, batch_no)
callback.writer.flush()
def fit_one_epoch(model_rpn, model_all, loss_history, callback, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, anchors, bbox_util, roi_helper):
total_loss = 0
rpn_loc_loss = 0
rpn_cls_loss = 0
roi_loc_loss = 0
roi_cls_loss = 0
val_loss = 0
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.train_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
write_log(callback, ['total_loss','rpn_cls_loss', 'rpn_reg_loss', 'detection_cls_loss', 'detection_reg_loss'], loss_class, iteration)
rpn_cls_loss += loss_class[1]
rpn_loc_loss += loss_class[2]
roi_cls_loss += loss_class[3]
roi_loc_loss += loss_class[4]
total_loss = rpn_loc_loss + rpn_cls_loss + roi_loc_loss + roi_cls_loss
pbar.set_postfix(**{'total' : total_loss / (iteration + 1),
'rpn_cls' : rpn_cls_loss / (iteration + 1),
'rpn_loc' : rpn_loc_loss / (iteration + 1),
'roi_cls' : roi_cls_loss / (iteration + 1),
'roi_loc' : roi_loc_loss / (iteration + 1),
'lr' : K.get_value(model_rpn.optimizer.lr)})
pbar.update(1)
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
X, Y, boxes = batch[0], batch[1], batch[2]
P_rpn = model_rpn.predict_on_batch(X)
results = bbox_util.detection_out_rpn(P_rpn, anchors)
roi_inputs = []
out_classes = []
out_regrs = []
for i in range(len(X)):
R = results[i]
X2, Y1, Y2 = roi_helper.calc_iou(R, boxes[i])
roi_inputs.append(X2)
out_classes.append(Y1)
out_regrs.append(Y2)
loss_class = model_all.test_on_batch([X, np.array(roi_inputs)], [Y[0], Y[1], np.array(out_classes), np.array(out_regrs)])
val_loss += loss_class[0]
pbar.set_postfix(**{'total' : val_loss / (iteration + 1)})
pbar.update(1)
logs = {'loss': total_loss / epoch_step, 'val_loss': val_loss / epoch_step_val}
loss_history.on_epoch_end([], logs)
print('Epoch:'+ str(epoch+1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))
model_all.save_weights('logs/ep%03d-loss%.3f-val_loss%.3f.h5' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val))
| [
"numpy.array",
"tqdm.tqdm",
"tensorflow.Summary",
"keras.backend.get_value"
] | [((211, 223), 'tensorflow.Summary', 'tf.Summary', ([], {}), '()\n', (221, 223), True, 'import tensorflow as tf\n'), ((730, 822), 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_step', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), "(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict,\n mininterval=0.3)\n", (734, 822), False, 'from tqdm import tqdm\n'), ((2572, 2668), 'tqdm.tqdm', 'tqdm', ([], {'total': 'epoch_step_val', 'desc': 'f"""Epoch {epoch + 1}/{Epoch}"""', 'postfix': 'dict', 'mininterval': '(0.3)'}), "(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict,\n mininterval=0.3)\n", (2576, 2668), False, 'from tqdm import tqdm\n'), ((1515, 1535), 'numpy.array', 'np.array', (['roi_inputs'], {}), '(roi_inputs)\n', (1523, 1535), True, 'import numpy as np\n'), ((1551, 1572), 'numpy.array', 'np.array', (['out_classes'], {}), '(out_classes)\n', (1559, 1572), True, 'import numpy as np\n'), ((1574, 1593), 'numpy.array', 'np.array', (['out_regrs'], {}), '(out_regrs)\n', (1582, 1593), True, 'import numpy as np\n'), ((3397, 3417), 'numpy.array', 'np.array', (['roi_inputs'], {}), '(roi_inputs)\n', (3405, 3417), True, 'import numpy as np\n'), ((3433, 3454), 'numpy.array', 'np.array', (['out_classes'], {}), '(out_classes)\n', (3441, 3454), True, 'import numpy as np\n'), ((3456, 3475), 'numpy.array', 'np.array', (['out_regrs'], {}), '(out_regrs)\n', (3464, 3475), True, 'import numpy as np\n'), ((2463, 2498), 'keras.backend.get_value', 'K.get_value', (['model_rpn.optimizer.lr'], {}), '(model_rpn.optimizer.lr)\n', (2474, 2498), True, 'from keras import backend as K\n')] |
from mopidy_vfd import Extension
def test_get_default_config():
ext = Extension()
config = ext.get_default_config()
assert "[vfd]" in config
assert "enabled = true" in config
def test_get_config_schema():
ext = Extension()
schema = ext.get_config_schema()
assert "display" in schema
| [
"mopidy_vfd.Extension"
] | [((76, 87), 'mopidy_vfd.Extension', 'Extension', ([], {}), '()\n', (85, 87), False, 'from mopidy_vfd import Extension\n'), ((237, 248), 'mopidy_vfd.Extension', 'Extension', ([], {}), '()\n', (246, 248), False, 'from mopidy_vfd import Extension\n')] |
from http import HTTPStatus
import time
import logging
import pytest
import grequests
from flask import url_for
from eth_utils import (
to_checksum_address,
to_canonical_address,
is_checksum_address,
)
from raiden_contracts.constants import (
CONTRACT_HUMAN_STANDARD_TOKEN,
MAX_TOKENS_DEPLOY,
TEST_SETTLE_TIMEOUT_MIN,
TEST_SETTLE_TIMEOUT_MAX,
)
from raiden.api.v1.encoding import (
AddressField,
HexAddressConverter,
)
from raiden.transfer.state import (
CHANNEL_STATE_OPENED,
CHANNEL_STATE_CLOSED,
)
from raiden.tests.utils import assert_dicts_are_equal
from raiden.tests.utils.client import burn_all_eth
from raiden.tests.utils.smartcontracts import deploy_contract_web3
# pylint: disable=too-many-locals,unused-argument,too-many-lines
def assert_no_content_response(response):
assert(
response is not None and
response.text == '' and
response.status_code == HTTPStatus.NO_CONTENT
)
def assert_response_with_code(response, status_code):
assert (
response is not None and
response.status_code == status_code
)
def assert_response_with_error(response, status_code):
assert (
response is not None and
response.status_code == status_code and
'errors' in response.json() and
response.json()['errors'] != ''
)
def assert_proper_response(response, status_code=HTTPStatus.OK):
assert (
response is not None and
response.status_code == status_code and
response.headers['Content-Type'] == 'application/json'
)
def api_url_for(api_backend, endpoint, **kwargs):
api_server, _ = api_backend
# url_for() expects binary address so we have to convert here
for key, val in kwargs.items():
if isinstance(val, str) and val.startswith('0x'):
pass
#kwargs[key] = to_canonical_address(val)
with api_server.flask_app.app_context():
return url_for('v1_resources.{}'.format(endpoint), **kwargs)
def test_hex_converter():
converter = HexAddressConverter(map=None)
# invalid hex data
with pytest.raises(Exception):
converter.to_python('-')
# invalid address, too short
with pytest.raises(Exception):
converter.to_python('0x1234')
# missing prefix 0x
with pytest.raises(Exception):
converter.to_python('414d72a6f6e28f4950117696081450d63d56c354')
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert converter.to_python('0x414D72a6f6E28F4950117696081450d63D56C354') == address
def test_address_field():
# pylint: disable=protected-access
field = AddressField()
attr = 'test'
data = object()
# invalid hex data
with pytest.raises(Exception):
field._deserialize('-', attr, data)
# invalid address, too short
with pytest.raises(Exception):
field._deserialize('0x1234', attr, data)
# missing prefix 0x
with pytest.raises(Exception):
field._deserialize('414d72a6f6e28f4950117696081450d63d56c354', attr, data)
address = b'AMr\xa6\xf6\xe2\x8fIP\x11v\x96\x08\x14P\xd6=V\xc3T'
assert field._deserialize('0x414D72a6f6E28F4950117696081450d63D56C354', attr, data) == address
def test_url_with_invalid_address(rest_api_port_number, api_backend):
""" Addresses require the leading 0x in the urls. """
url_without_prefix = (
'http://localhost:{port}/api/1/'
'channels/ea674fdde714fd979de3edf0f56aa9716b898ec8'
).format(port=rest_api_port_number)
request = grequests.patch(
url_without_prefix,
json=dict(state='CHANNEL_STATE_SETTLED'),
)
response = request.send().response
assert_response_with_code(response, HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_without_prefix(api_backend):
""" Addresses require leading 0x in the payload. """
invalid_address = '61c808d82a3ac53231750dadc13c777b59310bd9'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_chars(api_backend):
""" Addresses cannot have invalid characters in it. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310bdg' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_invalid_length(api_backend):
""" Encoded addresses must have the right length. """
invalid_address = '0x61c808d82a3ac53231750dadc13c777b59310b' # g at the end is invalid
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 10,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_payload_with_address_not_eip55(api_backend):
""" Provided addresses must be EIP55 encoded. """
invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69'
channel_data_obj = {
'partner_address': invalid_address,
'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8',
'settle_timeout': 90,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_query_our_address(api_backend):
request = grequests.get(
api_url_for(api_backend, 'addressresource'),
)
response = request.send().response
assert_proper_response(response)
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
assert response.json() == {'our_address': to_checksum_address(our_address)}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_get_channel_list(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == []
# let's create a new channel
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channelsresource',
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
channel_info = response.json()[0]
assert channel_info['partner_address'] == partner_address
assert channel_info['token_address'] == to_checksum_address(token_address)
assert 'token_network_identifier' in channel_info
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_status_channel_nonexistant(
api_backend,
token_addresses,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_proper_response(response, HTTPStatus.NOT_FOUND)
assert response.json()['errors'] == (
"Channel with partner '{}' for token '{}' could not be found.".format(
to_checksum_address(partner_address),
to_checksum_address(token_address),
)
)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_and_deposit_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = 0
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# now let's open a channel and make a deposit too
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance = 100
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = token_network_identifier
assert_dicts_are_equal(response, expected_response)
# let's deposit on the first channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=first_partner_address,
),
json={'total_deposit': balance},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': balance,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# let's try querying for the second channel
request = grequests.get(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=second_partner_address,
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = {
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_OPENED,
'balance': balance,
'token_network_identifier': token_network_identifier,
}
assert_dicts_are_equal(response, expected_response)
# finally let's burn all eth and try to open another channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
channel_data_obj = {
'partner_address': '0xf3AF96F89b3d7CdcBE0C083690A28185Feb0b3CE',
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': 1,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_close_and_settle_channel(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
token_network_identifier = response['token_network_identifier']
# let's close the channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response)
expected_response = {
'token_network_identifier': token_network_identifier,
'channel_identifier': assert_dicts_are_equal.IGNORE_VALUE,
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'state': CHANNEL_STATE_CLOSED,
'balance': balance,
}
assert_dicts_are_equal(response.json(), expected_response)
def test_api_close_insufficient_eth(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
balance = 0
assert_proper_response(response, status_code=HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['reveal_timeout'] = reveal_timeout
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
# let's burn all eth and try to close the channel
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json={'state': CHANNEL_STATE_CLOSED},
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_open_channel_invalid_input(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = TEST_SETTLE_TIMEOUT_MIN - 1
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
channel_data_obj['settle_timeout'] = TEST_SETTLE_TIMEOUT_MAX + 1
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_api_channel_state_change_errors(
api_backend,
token_addresses,
reveal_timeout,
):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# let's try to set a random state
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state='inlimbo'),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# let's try to set both new state and balance
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED, total_deposit=200),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
# let's try to patch with no arguments
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST)
# ok now let's close and settle for real
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(state=CHANNEL_STATE_CLOSED),
)
response = request.send().response
assert_proper_response(response)
# let's try to deposit to a settled channel
request = grequests.patch(
api_url_for(
api_backend,
'channelsresourcebytokenandpartneraddress',
token_address=token_address,
partner_address=partner_address,
),
json=dict(total_deposit=500),
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.CONFLICT)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_api_tokens(api_backend, blockchain_services, token_addresses):
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address1 = token_addresses[0]
token_address2 = token_addresses[1]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address1),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address2),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's get the token list
request = grequests.get(
api_url_for(
api_backend,
'tokensresource',
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
to_checksum_address(token_address1),
to_checksum_address(token_address2),
]
assert set(response) == set(expected_response)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_query_partners_by_token(api_backend, blockchain_services, token_addresses):
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
channel_data_obj['partner_address'] = second_partner_address
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
# and a channel for another token
channel_data_obj['partner_address'] = '0xb07937AbA15304FBBB0Bf6454a9377a76E3dD39E'
channel_data_obj['token_address'] = to_checksum_address(token_address)
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
# and now let's query our partners per token for the first token
request = grequests.get(
api_url_for(
api_backend,
'partnersresourcebytokenaddress',
token_address=to_checksum_address(token_address),
),
)
response = request.send().response
assert_proper_response(response)
response = response.json()
expected_response = [
{
'partner_address': first_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(first_partner_address),
),
}, {
'partner_address': second_partner_address,
'channel': '/api/1/channels/{}/{}'.format(
to_checksum_address(token_address),
to_checksum_address(second_partner_address),
),
},
]
assert all(r in response for r in expected_response)
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_transfers(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
amount = 200
identifier = 42
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
transfer = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'amount': amount,
'identifier': identifier,
}
request = grequests.post(
api_url_for(
api_backend,
'transfertotargetresource',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'amount': amount, 'identifier': identifier},
)
response = request.send().response
assert_proper_response(response)
response = response.json()
assert response == transfer
#demo
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_crosstransactiontry(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
raiden = _.raiden
sendETH_amount = 101
sendBTC_amount =2
receiveBTC_address = "1JnC15WwDVcC3QbQRUY6ChqRLucLpTGaJN"
token_address = token_addresses[0]
target_address = app1.raiden.address
api_server, _ = api_backend
our_address = api_server.rest_api.raiden_api.address
crosstransaction = {
'initiator_address': to_checksum_address(our_address),
'target_address': to_checksum_address(target_address),
'token_address': to_checksum_address(token_address),
'sendETH_amount': sendETH_amount,
'sendBTC_amount': sendBTC_amount,
'receiveBTC_address':receiveBTC_address,
}
request = grequests.post(
api_url_for(
api_backend,
'crosstransactiontry',
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
),
json={'initiator_address': to_checksum_address(our_address), 'sendETH_amount': sendETH_amount,'sendBTC_amount':sendBTC_amount,'receiveBTC_address':receiveBTC_address},
)
response = request.send().response
time.sleep(10)
hash_r = raiden.wal.storage.get_all_crosstransaction()[0][9]
test_api_crosstransation_hash(api_backend,raiden_network,token_address,hash_r)
assert_proper_response(response)
response = response.json()
assert response == crosstransaction
#demo
@pytest.mark.parametrize('number_of_nodes', [2])
def test_api_getcrosstransation(api_backend, raiden_network, token_addresses):
_, app1 = raiden_network
api_server, _ = api_backend
raiden = app1.raiden
test_api_crosstransactiontry(api_backend,raiden_network,token_addresses)
request = grequests.get(
api_url_for(
api_backend,
'getcrosstransaction',
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
logging.debug(response)
assert response.json() != []
#test getcrosstransation_by_id
cross_id = response.json()[0]['crossid']
test_api_getcrosstransation_by_id(api_backend,raiden_network,token_addresses,cross_id)
def test_api_getcrosstransation_by_id(api_backend, raiden_network, token_addresses,cross_id):
_, app1 = raiden_network
api_server, _ = api_backend
cross_id = cross_id
request = grequests.get(
api_url_for(
api_backend,
'getcrosstransactionbyid',
cross_id = cross_id,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() != []
def test_api_crosstransation_hash(api_backend, raiden_network, token_addresses,hash_r):
_, app1 = raiden_network
api_server, _ = api_backend
hash_r = str(hash_r)
request = grequests.get(
api_url_for(
api_backend,
'recivehashresource',
hash_r = hash_r,
)
)
response = request.send().response
assert_proper_response(response, HTTPStatus.OK)
assert response.json() == 'hash_r is ok'
@pytest.mark.parametrize('number_of_tokens', [0])
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_register_token(api_backend, token_amount, token_addresses, raiden_network):
app0 = raiden_network[0]
new_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
other_token_address = deploy_contract_web3(
CONTRACT_HUMAN_STANDARD_TOKEN,
app0.raiden.chain.client,
num_confirmations=None,
constructor_arguments=(
token_amount,
2,
'raiden',
'Rd',
),
)
register_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
register_response = register_request.send().response
assert_proper_response(register_response, status_code=HTTPStatus.CREATED)
response_json = register_response.json()
assert 'token_network_address' in response_json
assert is_checksum_address(response_json['token_network_address'])
# now try to reregister it and get the error
conflict_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(new_token_address),
))
conflict_response = conflict_request.send().response
assert_response_with_error(conflict_response, HTTPStatus.CONFLICT)
# Burn all the eth and then make sure we get the appropriate API error
burn_all_eth(app0.raiden)
poor_request = grequests.put(api_url_for(
api_backend,
'registertokenresource',
token_address=to_checksum_address(other_token_address),
))
poor_response = poor_request.send().response
assert_response_with_error(poor_response, HTTPStatus.PAYMENT_REQUIRED)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_get_connection_managers_info(api_backend, token_addresses):
# check that there are no registered tokens
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert len(result) == 0
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now is one registered channel manager
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 1
assert token_address1 in result
assert isinstance(result[token_address1], dict)
assert set(result[token_address1].keys()) == {'funds', 'sum_deposits', 'channels'}
funds = 100
token_address2 = to_checksum_address(token_addresses[1])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address2,
),
json=connect_data_obj,
)
response = request.send().response
assert_no_content_response(response)
# check that there now are two registered channel managers
request = grequests.get(
api_url_for(api_backend, 'connectionsinforesource'),
)
response = request.send().response
result = response.json()
assert isinstance(result, dict) and len(result.keys()) == 2
assert token_address2 in result
assert isinstance(result[token_address2], dict)
assert set(result[token_address2].keys()) == {'funds', 'sum_deposits', 'channels'}
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('number_of_tokens', [2])
def test_connect_insufficient_eth(api_backend, token_addresses):
# Burn all eth and then try to connect to a token network
api_server, _ = api_backend
burn_all_eth(api_server.rest_api.raiden_api.raiden)
funds = 100
token_address1 = to_checksum_address(token_addresses[0])
connect_data_obj = {
'funds': funds,
}
request = grequests.put(
api_url_for(
api_backend,
'connectionsresource',
token_address=token_address1,
),
json=connect_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.PAYMENT_REQUIRED)
response = response.json()
assert 'Insufficient ETH' in response['errors']
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_network_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'networkeventsresource',
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'tokeneventsresource',
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_channel_events(api_backend, token_addresses):
# let's create a new channel
partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
channel_data_obj = {
'partner_address': partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.CREATED)
request = grequests.get(
api_url_for(
api_backend,
'channeleventsresource',
partner_address=partner_address,
token_address=token_address,
from_block=0,
),
)
response = request.send().response
assert_proper_response(response, status_code=HTTPStatus.OK)
assert len(response.json()) > 0
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
def test_token_events_errors_for_unregistered_token(api_backend):
request = grequests.get(
api_url_for(
api_backend,
'tokeneventsresource',
token_address='<KEY>',
from_block=5,
to_block=20,
),
)
response = request.send().response
assert_response_with_error(response, status_code=HTTPStatus.NOT_FOUND)
@pytest.mark.parametrize('number_of_nodes', [1])
@pytest.mark.parametrize('channels_per_node', [0])
@pytest.mark.parametrize('deposit', [50000])
def test_api_deposit_limit(
api_backend,
token_addresses,
reveal_timeout,
):
# let's create a new channel and deposit exactly the limit amount
first_partner_address = '0x61C808D82A3Ac53231750daDc13c777b59310bD9'
token_address = token_addresses[0]
settle_timeout = 1650
balance_working = MAX_TOKENS_DEPLOY * (10 ** 2) # token has two digits
channel_data_obj = {
'partner_address': first_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance_working,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CREATED)
response = response.json()
expected_response = channel_data_obj
expected_response['balance'] = balance_working
expected_response['state'] = CHANNEL_STATE_OPENED
expected_response['channel_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
expected_response['token_network_identifier'] = assert_dicts_are_equal.IGNORE_VALUE
assert_dicts_are_equal(response, expected_response)
# now let's open a channel and deposit a bit more than the limit
second_partner_address = '0x29FA6cf0Cce24582a9B20DB94Be4B6E017896038'
balance_failing = balance_working + 1 # token has two digits
channel_data_obj = {
'partner_address': second_partner_address,
'token_address': to_checksum_address(token_address),
'settle_timeout': settle_timeout,
'reveal_timeout': reveal_timeout,
'balance': balance_failing,
}
request = grequests.put(
api_url_for(
api_backend,
'channelsresource',
),
json=channel_data_obj,
)
response = request.send().response
assert_proper_response(response, HTTPStatus.CONFLICT)
response = response.json()
assert response['errors'] == 'The deposit of 10001 is bigger than the current limit of 10000'
| [
"raiden.tests.utils.smartcontracts.deploy_contract_web3",
"raiden.api.v1.encoding.HexAddressConverter",
"logging.debug",
"raiden.tests.utils.assert_dicts_are_equal",
"eth_utils.to_checksum_address",
"time.sleep",
"pytest.mark.parametrize",
"pytest.raises",
"raiden.api.v1.encoding.AddressField",
"r... | [((3759, 3806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (3782, 3806), False, 'import pytest\n'), ((3808, 3857), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (3831, 3857), False, 'import pytest\n'), ((4477, 4524), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (4500, 4524), False, 'import pytest\n'), ((4526, 4575), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (4549, 4575), False, 'import pytest\n'), ((5226, 5273), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (5249, 5273), False, 'import pytest\n'), ((5275, 5324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (5298, 5324), False, 'import pytest\n'), ((5972, 6019), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (5995, 6019), False, 'import pytest\n'), ((6021, 6070), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (6044, 6070), False, 'import pytest\n'), ((6684, 6731), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (6707, 6731), False, 'import pytest\n'), ((6733, 6782), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (6756, 6782), False, 'import pytest\n'), ((7165, 7212), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (7188, 7212), False, 'import pytest\n'), ((7214, 7263), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (7237, 7263), False, 'import pytest\n'), ((8708, 8755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (8731, 8755), False, 'import pytest\n'), ((8757, 8806), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (8780, 8806), False, 'import pytest\n'), ((9582, 9629), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (9605, 9629), False, 'import pytest\n'), ((9631, 9680), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (9654, 9680), False, 'import pytest\n'), ((14499, 14546), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (14522, 14546), False, 'import pytest\n'), ((14548, 14597), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (14571, 14597), False, 'import pytest\n'), ((18531, 18578), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (18554, 18578), False, 'import pytest\n'), ((18580, 18629), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (18603, 18629), False, 'import pytest\n'), ((19728, 19775), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (19751, 19775), False, 'import pytest\n'), ((19777, 19826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (19800, 19826), False, 'import pytest\n'), ((22628, 22675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (22651, 22675), False, 'import pytest\n'), ((22677, 22726), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (22700, 22726), False, 'import pytest\n'), ((22728, 22776), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_tokens"""', '[2]'], {}), "('number_of_tokens', [2])\n", (22751, 22776), False, 'import pytest\n'), ((24422, 24469), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (24445, 24469), False, 'import pytest\n'), ((24471, 24520), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (24494, 24520), False, 'import pytest\n'), ((27061, 27108), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[2]'], {}), "('number_of_nodes', [2])\n", (27084, 27108), False, 'import pytest\n'), ((28151, 28198), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[2]'], {}), "('number_of_nodes', [2])\n", (28174, 28198), False, 'import pytest\n'), ((29718, 29765), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[2]'], {}), "('number_of_nodes', [2])\n", (29741, 29765), False, 'import pytest\n'), ((31399, 31447), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_tokens"""', '[0]'], {}), "('number_of_tokens', [0])\n", (31422, 31447), False, 'import pytest\n'), ((31449, 31496), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (31472, 31496), False, 'import pytest\n'), ((31498, 31547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (31521, 31547), False, 'import pytest\n'), ((33458, 33505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (33481, 33505), False, 'import pytest\n'), ((33507, 33556), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (33530, 33556), False, 'import pytest\n'), ((33558, 33606), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_tokens"""', '[2]'], {}), "('number_of_tokens', [2])\n", (33581, 33606), False, 'import pytest\n'), ((35677, 35724), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (35700, 35724), False, 'import pytest\n'), ((35726, 35775), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (35749, 35775), False, 'import pytest\n'), ((35777, 35825), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_tokens"""', '[2]'], {}), "('number_of_tokens', [2])\n", (35800, 35825), False, 'import pytest\n'), ((36565, 36612), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (36588, 36612), False, 'import pytest\n'), ((36614, 36663), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (36637, 36663), False, 'import pytest\n'), ((37624, 37671), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (37647, 37671), False, 'import pytest\n'), ((37673, 37722), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (37696, 37722), False, 'import pytest\n'), ((38720, 38767), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (38743, 38767), False, 'import pytest\n'), ((38769, 38818), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (38792, 38818), False, 'import pytest\n'), ((39865, 39912), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (39888, 39912), False, 'import pytest\n'), ((39914, 39963), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (39937, 39963), False, 'import pytest\n'), ((40360, 40407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""number_of_nodes"""', '[1]'], {}), "('number_of_nodes', [1])\n", (40383, 40407), False, 'import pytest\n'), ((40409, 40458), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channels_per_node"""', '[0]'], {}), "('channels_per_node', [0])\n", (40432, 40458), False, 'import pytest\n'), ((40460, 40503), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""deposit"""', '[50000]'], {}), "('deposit', [50000])\n", (40483, 40503), False, 'import pytest\n'), ((2057, 2086), 'raiden.api.v1.encoding.HexAddressConverter', 'HexAddressConverter', ([], {'map': 'None'}), '(map=None)\n', (2076, 2086), False, 'from raiden.api.v1.encoding import AddressField, HexAddressConverter\n'), ((2654, 2668), 'raiden.api.v1.encoding.AddressField', 'AddressField', ([], {}), '()\n', (2666, 2668), False, 'from raiden.api.v1.encoding import AddressField, HexAddressConverter\n'), ((10780, 10831), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (10802, 10831), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((11887, 11938), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (11909, 11938), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((12798, 12849), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (12820, 12849), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((13676, 13727), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (13698, 13727), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((13830, 13881), 'raiden.tests.utils.client.burn_all_eth', 'burn_all_eth', (['api_server.rest_api.raiden_api.raiden'], {}), '(api_server.rest_api.raiden_api.raiden)\n', (13842, 13881), False, 'from raiden.tests.utils.client import burn_all_eth\n'), ((15738, 15789), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (15760, 15789), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((17863, 17914), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (17885, 17914), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((18006, 18057), 'raiden.tests.utils.client.burn_all_eth', 'burn_all_eth', (['api_server.rest_api.raiden_api.raiden'], {}), '(api_server.rest_api.raiden_api.raiden)\n', (18018, 18057), False, 'from raiden.tests.utils.client import burn_all_eth\n'), ((25798, 25832), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (25817, 25832), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((29436, 29450), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (29446, 29450), False, 'import time\n'), ((30230, 30253), 'logging.debug', 'logging.debug', (['response'], {}), '(response)\n', (30243, 30253), False, 'import logging\n'), ((31686, 31853), 'raiden.tests.utils.smartcontracts.deploy_contract_web3', 'deploy_contract_web3', (['CONTRACT_HUMAN_STANDARD_TOKEN', 'app0.raiden.chain.client'], {'num_confirmations': 'None', 'constructor_arguments': "(token_amount, 2, 'raiden', 'Rd')"}), "(CONTRACT_HUMAN_STANDARD_TOKEN, app0.raiden.chain.\n client, num_confirmations=None, constructor_arguments=(token_amount, 2,\n 'raiden', 'Rd'))\n", (31706, 31853), False, 'from raiden.tests.utils.smartcontracts import deploy_contract_web3\n'), ((31969, 32136), 'raiden.tests.utils.smartcontracts.deploy_contract_web3', 'deploy_contract_web3', (['CONTRACT_HUMAN_STANDARD_TOKEN', 'app0.raiden.chain.client'], {'num_confirmations': 'None', 'constructor_arguments': "(token_amount, 2, 'raiden', 'Rd')"}), "(CONTRACT_HUMAN_STANDARD_TOKEN, app0.raiden.chain.\n client, num_confirmations=None, constructor_arguments=(token_amount, 2,\n 'raiden', 'Rd'))\n", (31989, 32136), False, 'from raiden.tests.utils.smartcontracts import deploy_contract_web3\n'), ((32643, 32702), 'eth_utils.is_checksum_address', 'is_checksum_address', (["response_json['token_network_address']"], {}), "(response_json['token_network_address'])\n", (32662, 32702), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((33134, 33159), 'raiden.tests.utils.client.burn_all_eth', 'burn_all_eth', (['app0.raiden'], {}), '(app0.raiden)\n', (33146, 33159), False, 'from raiden.tests.utils.client import burn_all_eth\n'), ((33954, 33993), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_addresses[0]'], {}), '(token_addresses[0])\n', (33973, 33993), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((34832, 34871), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_addresses[1]'], {}), '(token_addresses[1])\n', (34851, 34871), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((35990, 36041), 'raiden.tests.utils.client.burn_all_eth', 'burn_all_eth', (['api_server.rest_api.raiden_api.raiden'], {}), '(api_server.rest_api.raiden_api.raiden)\n', (36002, 36041), False, 'from raiden.tests.utils.client import burn_all_eth\n'), ((36079, 36118), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_addresses[0]'], {}), '(token_addresses[0])\n', (36098, 36118), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((41755, 41806), 'raiden.tests.utils.assert_dicts_are_equal', 'assert_dicts_are_equal', (['response', 'expected_response'], {}), '(response, expected_response)\n', (41777, 41806), False, 'from raiden.tests.utils import assert_dicts_are_equal\n'), ((2120, 2144), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2133, 2144), False, 'import pytest\n'), ((2222, 2246), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2235, 2246), False, 'import pytest\n'), ((2320, 2344), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2333, 2344), False, 'import pytest\n'), ((2740, 2764), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2753, 2764), False, 'import pytest\n'), ((2853, 2877), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2866, 2877), False, 'import pytest\n'), ((2962, 2986), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2975, 2986), False, 'import pytest\n'), ((7877, 7911), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (7896, 7911), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((8616, 8650), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (8635, 8650), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((10064, 10098), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (10083, 10098), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((11149, 11183), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (11168, 11183), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((12539, 12573), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (12558, 12573), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((13417, 13451), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (13436, 13451), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((14005, 14039), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (14024, 14039), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((14974, 15008), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (14993, 15008), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((16472, 16506), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (16491, 16506), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((17099, 17133), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (17118, 17133), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((18993, 19027), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (19012, 19027), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((20168, 20202), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (20187, 20202), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((23117, 23152), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address1'], {}), '(token_address1)\n', (23136, 23152), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((23641, 23676), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address2'], {}), '(token_address2)\n', (23660, 23676), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((24280, 24315), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address1'], {}), '(token_address1)\n', (24299, 24315), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((24325, 24360), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address2'], {}), '(token_address2)\n', (24344, 24360), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((24918, 24952), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (24937, 24952), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((27462, 27494), 'eth_utils.to_checksum_address', 'to_checksum_address', (['our_address'], {}), '(our_address)\n', (27481, 27494), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((27522, 27557), 'eth_utils.to_checksum_address', 'to_checksum_address', (['target_address'], {}), '(target_address)\n', (27541, 27557), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((27584, 27618), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (27603, 27618), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((28664, 28696), 'eth_utils.to_checksum_address', 'to_checksum_address', (['our_address'], {}), '(our_address)\n', (28683, 28696), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((28724, 28759), 'eth_utils.to_checksum_address', 'to_checksum_address', (['target_address'], {}), '(target_address)\n', (28743, 28759), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((28786, 28820), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (28805, 28820), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((36978, 37012), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (36997, 37012), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((38035, 38069), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (38054, 38069), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((39133, 39167), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (39152, 39167), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((40989, 41023), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (41008, 41023), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((42118, 42152), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (42137, 42152), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((7128, 7160), 'eth_utils.to_checksum_address', 'to_checksum_address', (['our_address'], {}), '(our_address)\n', (7147, 7160), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((9477, 9513), 'eth_utils.to_checksum_address', 'to_checksum_address', (['partner_address'], {}), '(partner_address)\n', (9496, 9513), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((9527, 9561), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (9546, 9561), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((26301, 26335), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (26320, 26335), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((26622, 26656), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (26641, 26656), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((26674, 26716), 'eth_utils.to_checksum_address', 'to_checksum_address', (['first_partner_address'], {}), '(first_partner_address)\n', (26693, 26716), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((26872, 26906), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (26891, 26906), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((26924, 26967), 'eth_utils.to_checksum_address', 'to_checksum_address', (['second_partner_address'], {}), '(second_partner_address)\n', (26943, 26967), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((27829, 27863), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (27848, 27863), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((27892, 27927), 'eth_utils.to_checksum_address', 'to_checksum_address', (['target_address'], {}), '(target_address)\n', (27911, 27927), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((29099, 29133), 'eth_utils.to_checksum_address', 'to_checksum_address', (['token_address'], {}), '(token_address)\n', (29118, 29133), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((29162, 29197), 'eth_utils.to_checksum_address', 'to_checksum_address', (['target_address'], {}), '(target_address)\n', (29181, 29197), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((29246, 29278), 'eth_utils.to_checksum_address', 'to_checksum_address', (['our_address'], {}), '(our_address)\n', (29265, 29278), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((32353, 32391), 'eth_utils.to_checksum_address', 'to_checksum_address', (['new_token_address'], {}), '(new_token_address)\n', (32372, 32391), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((32879, 32917), 'eth_utils.to_checksum_address', 'to_checksum_address', (['new_token_address'], {}), '(new_token_address)\n', (32898, 32917), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n'), ((33282, 33322), 'eth_utils.to_checksum_address', 'to_checksum_address', (['other_token_address'], {}), '(other_token_address)\n', (33301, 33322), False, 'from eth_utils import to_checksum_address, to_canonical_address, is_checksum_address\n')] |
import os
import argparse
from . import common
argparser = argparse.ArgumentParser(add_help=False)
graph_group = argparser.add_argument_group('graphtool arguments')
graph_group.add_argument('--graph-jar', metavar='<graphtool-jar>',
action='store',default=None, dest='graph_jar',
help='Path to prog2dfg.jar or apilearner.jar')
def run(args, javac_commands, jars):
if not args.graph_jar:
print("Could not run graph tool: missing arg --graph-jar")
return
tool_command = ["java", "-jar", args.graph_jar]
dot_dir = os.path.join(args.output_directory, "dot")
if not os.path.isdir(dot_dir):
os.makedirs(dot_dir)
for jc in javac_commands:
java_files = jc['java_files']
java_files_file = os.path.join(os.getcwd(), '__java_file_names.txt')
class_dir = common.class_directory(jc)
with open(java_files_file, 'w') as f:
for s in java_files:
f.write(s)
f.write("\n")
current_outdir = os.path.join(dot_dir,
class_dir.replace(os.getcwd(),'').replace(os.sep,"_"))
cmd = tool_command + ["-o", current_outdir,
"-j", class_dir,
"-all",
"-source", java_files_file]
common.run_cmd(cmd, args, 'graphtools')
| [
"os.makedirs",
"argparse.ArgumentParser",
"os.path.join",
"os.getcwd",
"os.path.isdir"
] | [((60, 99), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (83, 99), False, 'import argparse\n'), ((579, 621), 'os.path.join', 'os.path.join', (['args.output_directory', '"""dot"""'], {}), "(args.output_directory, 'dot')\n", (591, 621), False, 'import os\n'), ((631, 653), 'os.path.isdir', 'os.path.isdir', (['dot_dir'], {}), '(dot_dir)\n', (644, 653), False, 'import os\n'), ((659, 679), 'os.makedirs', 'os.makedirs', (['dot_dir'], {}), '(dot_dir)\n', (670, 679), False, 'import os\n'), ((778, 789), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (787, 789), False, 'import os\n'), ((1067, 1078), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1076, 1078), False, 'import os\n')] |
from django.urls import path
from .views import *
from django_filters.views import FilterView
app_name = 'jobs'
urlpatterns = [
path('', FilterView.as_view(filterset_class=JobFilter,
template_name='jobs/job_list.html'), name='index'),
path('companies/', CompanyListView.as_view(), name='companies'),
]
| [
"django_filters.views.FilterView.as_view"
] | [((143, 229), 'django_filters.views.FilterView.as_view', 'FilterView.as_view', ([], {'filterset_class': 'JobFilter', 'template_name': '"""jobs/job_list.html"""'}), "(filterset_class=JobFilter, template_name=\n 'jobs/job_list.html')\n", (161, 229), False, 'from django_filters.views import FilterView\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.geometry import transform_points_numpy
__all__ = [
'mesh_transform_numpy',
'mesh_transformed_numpy',
]
def mesh_transform_numpy(mesh, transformation):
"""Transform a mesh.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Notes
-----
The mesh is modified in-place.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh.copy()
>>> mesh_transform(tmesh, T)
"""
vertices = list(mesh.vertices())
xyz = [mesh.vertex_coordinates(vertex) for vertex in vertices]
xyz[:] = transform_points_numpy(xyz, transformation)
for index, vertex in enumerate(vertices):
mesh.vertex_attributes(vertex, 'xyz', xyz[index])
def mesh_transformed_numpy(mesh, transformation):
"""Transform a copy of ``mesh``.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh.
transformation : compas.geometry.Transformation
The transformation.
Returns
-------
Mesh
A transformed independent copy of ``mesh``.
Notes
-----
The original mesh is not modified.
Instead a transformed independent copy is returned.
Examples
--------
>>> mesh = Mesh.from_obj(compas.get('cube.obj'))
>>> T = matrix_from_axis_and_angle([0, 0, 1], pi / 4)
>>> tmesh = mesh_transformed(mesh, T)
"""
mesh_copy = mesh.copy()
mesh_transform_numpy(mesh_copy, transformation)
return mesh_copy
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod(globs=globals())
| [
"compas.geometry.transform_points_numpy"
] | [((858, 901), 'compas.geometry.transform_points_numpy', 'transform_points_numpy', (['xyz', 'transformation'], {}), '(xyz, transformation)\n', (880, 901), False, 'from compas.geometry import transform_points_numpy\n')] |
from cereal import car
from common.realtime import DT_CTRL
from common.numpy_fast import interp
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
from selfdrive.car import apply_std_steer_torque_limits
from selfdrive.car.gm import gmcan
from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams
from opendbc.can.packer import CANPacker
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.start_time = 0.
self.apply_steer_last = 0
self.lka_steering_cmd_counter_last = -1
self.lka_icon_status_last = (False, False)
self.steer_rate_limited = False
self.fcw_count = 0
self.params = CarControllerParams()
self.packer_pt = CANPacker(DBC[CP.carFingerprint]['pt'])
self.packer_obj = CANPacker(DBC[CP.carFingerprint]['radar'])
self.packer_ch = CANPacker(DBC[CP.carFingerprint]['chassis'])
self.debug_logging = False
self.debug_log_time_step = 0.333
self.last_debug_log_t = 0.
if self.debug_logging:
with open("/data/openpilot/coast_debug.csv","w") as f:
f.write(",".join([
"t",
"long plan",
"d (m/s)",
"v",
"vEgo",
"v_cruise",
"v (mph)",
"vEgo (mph)",
"v_cruise (mph)",
"ttc",
"coast gas lockout",
"coast brake lockout",
"gas in",
"brake in",
"one-pedal",
"coasting enabled",
"no f brakes",
"gas out",
"brake out"]) + "\n")
def update(self, enabled, CS, frame, actuators,
hud_v_cruise, hud_show_lanes, hud_show_car, hud_alert):
P = self.params
# Send CAN commands.
can_sends = []
# Steering (50Hz)
# Avoid GM EPS faults when transmitting messages too close together: skip this transmit if we just received the
# next Panda loopback confirmation in the current CS frame.
if CS.lka_steering_cmd_counter != self.lka_steering_cmd_counter_last:
self.lka_steering_cmd_counter_last = CS.lka_steering_cmd_counter
elif (frame % P.STEER_STEP) == 0:
lkas_enabled = (enabled or CS.pause_long_on_gas_press) and CS.lkMode and not (CS.out.steerWarning or CS.out.steerError) and CS.out.vEgo > P.MIN_STEER_SPEED and CS.lane_change_steer_factor > 0.
if lkas_enabled:
new_steer = int(round(actuators.steer * P.STEER_MAX * CS.lane_change_steer_factor))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, P)
self.steer_rate_limited = new_steer != apply_steer
else:
apply_steer = 0
self.apply_steer_last = apply_steer
# GM EPS faults on any gap in received message counters. To handle transient OP/Panda safety sync issues at the
# moment of disengaging, increment the counter based on the last message known to pass Panda safety checks.
idx = (CS.lka_steering_cmd_counter + 1) % 4
can_sends.append(gmcan.create_steering_control(self.packer_pt, CanBus.POWERTRAIN, apply_steer, idx, lkas_enabled))
# Gas/regen prep
if not enabled or CS.pause_long_on_gas_press:
# Stock ECU sends max regen when not enabled.
apply_gas = P.MAX_ACC_REGEN
apply_brake = 0
else:
apply_gas = interp(actuators.accel, P.GAS_LOOKUP_BP, P.GAS_LOOKUP_V)
apply_brake = interp(actuators.accel, P.BRAKE_LOOKUP_BP, P.BRAKE_LOOKUP_V)
t = sec_since_boot()
v_rel = CS.coasting_lead_v - CS.vEgo
ttc = min(-CS.coasting_lead_d / v_rel if (CS.coasting_lead_d > 0. and v_rel < 0.) else 100.,100.)
d_time = CS.coasting_lead_d / CS.vEgo if (CS.coasting_lead_d > 0. and CS.vEgo > 0. and CS.tr > 0.) else 10.
if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_gas_lockout_bp[-1] \
or v_rel < CS.lead_v_rel_long_gas_lockout_bp[-1] \
or CS.coasting_lead_v < CS.lead_v_long_gas_lockout_bp[-1] \
or d_time < CS.tr * CS.lead_tr_long_gas_lockout_bp[-1]\
or CS.coasting_lead_d < CS.lead_d_long_gas_lockout_bp[-1]):
lead_long_gas_lockout_factor = max([
interp(v_rel, CS.lead_v_rel_long_gas_lockout_bp, CS.lead_v_rel_long_gas_lockout_v),
interp(CS.coasting_lead_v, CS.lead_v_long_gas_lockout_bp, CS.lead_v_long_gas_lockout_v),
interp(ttc, CS.lead_ttc_long_gas_lockout_bp, CS.lead_ttc_long_gas_lockout_v),
interp(d_time / CS.tr, CS.lead_tr_long_gas_lockout_bp, CS.lead_tr_long_gas_lockout_v),
interp(CS.coasting_lead_d, CS.lead_d_long_gas_lockout_bp, CS.lead_d_long_gas_lockout_v)])
if CS.coasting_lead_d > 0. and (ttc < CS.lead_ttc_long_brake_lockout_bp[-1] \
or v_rel < CS.lead_v_rel_long_brake_lockout_bp[-1] \
or CS.coasting_lead_v < CS.lead_v_long_brake_lockout_bp[-1] \
or d_time < CS.tr * CS.lead_tr_long_brake_lockout_bp[-1]\
or CS.coasting_lead_d < CS.lead_d_long_brake_lockout_bp[-1]):
lead_long_brake_lockout_factor = max([
interp(v_rel, CS.lead_v_rel_long_brake_lockout_bp, CS.lead_v_rel_long_brake_lockout_v),
interp(CS.coasting_lead_v, CS.lead_v_long_brake_lockout_bp, CS.lead_v_long_brake_lockout_v),
interp(ttc, CS.lead_ttc_long_brake_lockout_bp, CS.lead_ttc_long_brake_lockout_v),
interp(d_time / CS.tr, CS.lead_tr_long_brake_lockout_bp, CS.lead_tr_long_brake_lockout_v),
interp(CS.coasting_lead_d, CS.lead_d_long_brake_lockout_bp, CS.lead_d_long_brake_lockout_v)])
else:
lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
else:
lead_long_gas_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
lead_long_brake_lockout_factor = 0. # 1.0 means regular braking logic is completely unaltered, 0.0 means no cruise braking
# debug logging
do_log = self.debug_logging and (t - self.last_debug_log_t > self.debug_log_time_step)
if do_log:
self.last_debug_log_t = t
f = open("/data/openpilot/coast_debug.csv","a")
f.write(",".join([f"{i:.1f}" if i == float else str(i) for i in [
t - CS.sessionInitTime,
CS.coasting_long_plan,
CS.coasting_lead_d,
CS.coasting_lead_v,
CS.vEgo,
CS.v_cruise_kph * CV.KPH_TO_MS,
CS.coasting_lead_v * CV.MS_TO_MPH,
CS.vEgo * CV.MS_TO_MPH,
CS.v_cruise_kph * CV.KPH_TO_MPH,
ttc,
lead_long_gas_lockout_factor,
lead_long_brake_lockout_factor,
int(apply_gas),
int(apply_brake),
(CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active),
CS.coasting_enabled,
CS.no_friction_braking]]) + ",")
if (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active):
if not CS.one_pedal_mode_active and CS.gear_shifter_ev == 4 and CS.one_pedal_dl_coasting_enabled and CS.vEgo > 0.05:
apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.ZERO_GAS ) * (1. - lead_long_gas_lockout_factor)
else:
apply_gas = apply_gas * lead_long_gas_lockout_factor + float(P.MAX_ACC_REGEN) * (1. - lead_long_gas_lockout_factor)
time_since_brake = t - CS.one_pedal_mode_last_gas_press_t
if CS.one_pedal_mode_active:
if abs(CS.angle_steers) > CS.one_pedal_angle_steers_cutoff_bp[0]:
one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake_minus1 = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[max(0,CS.one_pedal_brake_mode-1)], CS.one_pedal_mode_stop_apply_brake_v[max(0,CS.one_pedal_brake_mode-1)])
one_pedal_apply_brake = interp(abs(CS.angle_steers), CS.one_pedal_angle_steers_cutoff_bp, [one_pedal_apply_brake, one_pedal_apply_brake_minus1])
else:
one_pedal_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake *= interp(CS.pitch, CS.one_pedal_pitch_brake_adjust_bp, CS.one_pedal_pitch_brake_adjust_v[CS.one_pedal_brake_mode])
one_pedal_apply_brake = min(one_pedal_apply_brake, float(P.BRAKE_LOOKUP_V[0]))
one_pedal_apply_brake *= interp(time_since_brake, CS.one_pedal_mode_ramp_time_bp, CS.one_pedal_mode_ramp_time_v) if CS.one_pedal_brake_mode < 2 else 1.
else:
one_pedal_apply_brake = 0.
# ramp braking
if CS.one_pedal_mode_active_last and time_since_brake > CS.one_pedal_mode_ramp_time_bp[-1]:
if CS.one_pedal_mode_apply_brake != one_pedal_apply_brake:
if CS.one_pedal_mode_ramp_mode_last != CS.one_pedal_brake_mode:
# brake mode changed, so need to calculate new step based on the old and new modes
old_apply_brake = interp(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_mode_ramp_mode_last], CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_mode_ramp_mode_last])
CS.one_pedal_mode_ramp_time_step = (one_pedal_apply_brake - old_apply_brake) / (CS.one_pedal_mode_ramp_duration * (2. if CS.one_pedal_mode_apply_brake > one_pedal_apply_brake else 1.))
if CS.one_pedal_mode_apply_brake < one_pedal_apply_brake:
if CS.one_pedal_mode_ramp_time_step < 0.:
CS.one_pedal_mode_ramp_time_step *= -1.
CS.one_pedal_mode_apply_brake = max(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last))
else:
if CS.one_pedal_mode_ramp_time_step > 0.:
CS.one_pedal_mode_ramp_time_step *= -1.
CS.one_pedal_mode_apply_brake = min(one_pedal_apply_brake, CS.one_pedal_mode_apply_brake + CS.one_pedal_mode_ramp_time_step * (t - CS.one_pedal_mode_ramp_t_last))
one_pedal_apply_brake = CS.one_pedal_mode_apply_brake
else:
CS.one_pedal_mode_apply_brake = one_pedal_apply_brake
CS.one_pedal_mode_active_last = True
CS.one_pedal_mode_ramp_t_last = t
CS.one_pedal_mode_ramp_mode_last = CS.one_pedal_brake_mode
if CS.one_pedal_mode_op_braking_allowed and CS.coasting_long_plan not in ['cruise', 'limit']:
apply_brake = max(one_pedal_apply_brake, apply_brake * lead_long_brake_lockout_factor)
else:
apply_brake = one_pedal_apply_brake
elif CS.coasting_enabled and lead_long_brake_lockout_factor < 1.:
if CS.coasting_long_plan in ['cruise', 'limit'] and apply_gas < P.ZERO_GAS or apply_brake > 0.:
check_speed_ms = (CS.speed_limit if CS.speed_limit_active and CS.speed_limit < CS.v_cruise_kph else CS.v_cruise_kph) * CV.KPH_TO_MS
if apply_brake > 0.:
coasting_over_speed_vEgo_BP = [
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[0]),
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_vEgo_BP[1])
]
over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0. and CS.coasting_brake_over_speed_enabled) else 0.
over_speed_brake = apply_brake * over_speed_factor
apply_brake = max([apply_brake * lead_long_brake_lockout_factor, over_speed_brake])
if apply_gas < P.ZERO_GAS and lead_long_gas_lockout_factor < 1.:
coasting_over_speed_vEgo_BP = [
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[0]),
interp(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.coasting_over_speed_regen_vEgo_BP[1])
]
over_speed_factor = interp(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0., 1.]) if (check_speed_ms > 0 and CS.coasting_brake_over_speed_enabled) else 0.
coast_apply_gas = int(round(float(P.ZERO_GAS) - over_speed_factor * (P.ZERO_GAS - apply_gas)))
apply_gas = apply_gas * lead_long_gas_lockout_factor + coast_apply_gas * (1. - lead_long_gas_lockout_factor)
elif CS.no_friction_braking and lead_long_brake_lockout_factor < 1.:
if CS.coasting_long_plan in ['cruise', 'limit'] and apply_brake > 0.:
apply_brake *= lead_long_brake_lockout_factor
apply_gas = int(round(apply_gas))
apply_brake = int(round(apply_brake))
CS.one_pedal_mode_active_last = CS.one_pedal_mode_active
if do_log:
f.write(",".join([str(i) for i in [
apply_gas,
apply_brake]]) + "\n")
f.close()
if CS.showBrakeIndicator:
CS.apply_brake_percent = 0.
if CS.vEgo > 0.1:
if CS.out.cruiseState.enabled:
if not CS.pause_long_on_gas_press:
if apply_brake > 1:
CS.apply_brake_percent = interp(apply_brake, [float(P.BRAKE_LOOKUP_V[-1]), float(P.BRAKE_LOOKUP_V[0])], [51., 100.])
elif (CS.one_pedal_mode_active or CS.coast_one_pedal_mode_active):
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif apply_gas < P.ZERO_GAS:
CS.apply_brake_percent = interp(apply_gas, [float(P.GAS_LOOKUP_V[0]), float(P.GAS_LOOKUP_V[1])], [51., 0.])
else:
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif CS.is_ev and CS.out.brake == 0.:
CS.apply_brake_percent = interp(CS.hvb_wattage, CS.hvb_wattage_bp, [0., 50.])
elif CS.out.brake > 0.:
CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.])
elif CS.out.brake > 0.:
CS.apply_brake_percent = interp(CS.out.brake, [0., 0.5], [51., 100.])
# Gas/regen and brakes - all at 25Hz
if (frame % 4) == 0:
idx = (frame // 4) % 4
if CS.cruiseMain and not enabled and CS.autoHold and CS.autoHoldActive and not CS.out.gasPressed and CS.out.gearShifter in ['drive','low'] and CS.out.vEgo < 0.02 and not CS.regenPaddlePressed:
# Auto Hold State
car_stopping = apply_gas < P.ZERO_GAS
standstill = CS.pcm_acc_status == AccState.STANDSTILL
at_full_stop = standstill and car_stopping
near_stop = (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping
can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop))
CS.autoHoldActivated = True
else:
if CS.pause_long_on_gas_press:
at_full_stop = False
near_stop = False
car_stopping = False
standstill = False
else:
car_stopping = apply_gas < P.ZERO_GAS
standstill = CS.pcm_acc_status == AccState.STANDSTILL
at_full_stop = enabled and standstill and car_stopping
near_stop = enabled and (CS.out.vEgo < P.NEAR_STOP_BRAKE_PHASE) and car_stopping
can_sends.append(gmcan.create_friction_brake_command(self.packer_ch, CanBus.CHASSIS, apply_brake, idx, near_stop, at_full_stop))
CS.autoHoldActivated = False
# Auto-resume from full stop by resetting ACC control
acc_enabled = enabled
if standstill and not car_stopping:
acc_enabled = False
can_sends.append(gmcan.create_gas_regen_command(self.packer_pt, CanBus.POWERTRAIN, apply_gas, idx, acc_enabled, at_full_stop))
# Send dashboard UI commands (ACC status), 25hz
if (frame % 4) == 0:
send_fcw = hud_alert == VisualAlert.fcw
follow_level = CS.get_follow_level()
can_sends.append(gmcan.create_acc_dashboard_command(self.packer_pt, CanBus.POWERTRAIN, enabled,
hud_v_cruise * CV.MS_TO_KPH, hud_show_car, follow_level, send_fcw))
# Radar needs to know current speed and yaw rate (50hz),
# and that ADAS is alive (10hz)
time_and_headlights_step = 10
tt = frame * DT_CTRL
if frame % time_and_headlights_step == 0:
idx = (frame // time_and_headlights_step) % 4
can_sends.append(gmcan.create_adas_time_status(CanBus.OBSTACLE, int((tt - self.start_time) * 60), idx))
can_sends.append(gmcan.create_adas_headlights_status(self.packer_obj, CanBus.OBSTACLE))
speed_and_accelerometer_step = 2
if frame % speed_and_accelerometer_step == 0:
idx = (frame // speed_and_accelerometer_step) % 4
can_sends.append(gmcan.create_adas_steering_status(CanBus.OBSTACLE, idx))
can_sends.append(gmcan.create_adas_accelerometer_speed_status(CanBus.OBSTACLE, CS.out.vEgo, idx))
if frame % P.ADAS_KEEPALIVE_STEP == 0:
can_sends += gmcan.create_adas_keepalive(CanBus.POWERTRAIN)
# Show green icon when LKA torque is applied, and
# alarming orange icon when approaching torque limit.
# If not sent again, LKA icon disappears in about 5 seconds.
# Conveniently, sending camera message periodically also works as a keepalive.
lka_active = CS.lkas_status == 1
lka_critical = lka_active and abs(actuators.steer) > 0.9
lka_icon_status = (lka_active, lka_critical)
if frame % P.CAMERA_KEEPALIVE_STEP == 0 or lka_icon_status != self.lka_icon_status_last:
steer_alert = hud_alert in [VisualAlert.steerRequired, VisualAlert.ldw]
can_sends.append(gmcan.create_lka_icon_command(CanBus.SW_GMLAN, lka_active, lka_critical, steer_alert))
self.lka_icon_status_last = lka_icon_status
return can_sends
| [
"selfdrive.car.gm.values.CarControllerParams",
"selfdrive.car.gm.gmcan.create_steering_control",
"selfdrive.car.gm.gmcan.create_acc_dashboard_command",
"selfdrive.car.gm.gmcan.create_adas_steering_status",
"selfdrive.car.gm.gmcan.create_lka_icon_command",
"selfdrive.car.gm.gmcan.create_gas_regen_command",... | [((739, 760), 'selfdrive.car.gm.values.CarControllerParams', 'CarControllerParams', ([], {}), '()\n', (758, 760), False, 'from selfdrive.car.gm.values import DBC, AccState, CanBus, CarControllerParams\n'), ((783, 822), 'opendbc.can.packer.CANPacker', 'CANPacker', (["DBC[CP.carFingerprint]['pt']"], {}), "(DBC[CP.carFingerprint]['pt'])\n", (792, 822), False, 'from opendbc.can.packer import CANPacker\n'), ((845, 887), 'opendbc.can.packer.CANPacker', 'CANPacker', (["DBC[CP.carFingerprint]['radar']"], {}), "(DBC[CP.carFingerprint]['radar'])\n", (854, 887), False, 'from opendbc.can.packer import CANPacker\n'), ((909, 953), 'opendbc.can.packer.CANPacker', 'CANPacker', (["DBC[CP.carFingerprint]['chassis']"], {}), "(DBC[CP.carFingerprint]['chassis'])\n", (918, 953), False, 'from opendbc.can.packer import CANPacker\n'), ((3368, 3424), 'common.numpy_fast.interp', 'interp', (['actuators.accel', 'P.GAS_LOOKUP_BP', 'P.GAS_LOOKUP_V'], {}), '(actuators.accel, P.GAS_LOOKUP_BP, P.GAS_LOOKUP_V)\n', (3374, 3424), False, 'from common.numpy_fast import interp\n'), ((3445, 3505), 'common.numpy_fast.interp', 'interp', (['actuators.accel', 'P.BRAKE_LOOKUP_BP', 'P.BRAKE_LOOKUP_V'], {}), '(actuators.accel, P.BRAKE_LOOKUP_BP, P.BRAKE_LOOKUP_V)\n', (3451, 3505), False, 'from common.numpy_fast import interp\n'), ((3516, 3532), 'common.realtime.sec_since_boot', 'sec_since_boot', ([], {}), '()\n', (3530, 3532), False, 'from common.realtime import sec_since_boot\n'), ((17052, 17098), 'selfdrive.car.gm.gmcan.create_adas_keepalive', 'gmcan.create_adas_keepalive', (['CanBus.POWERTRAIN'], {}), '(CanBus.POWERTRAIN)\n', (17079, 17098), False, 'from selfdrive.car.gm import gmcan\n'), ((15988, 16137), 'selfdrive.car.gm.gmcan.create_acc_dashboard_command', 'gmcan.create_acc_dashboard_command', (['self.packer_pt', 'CanBus.POWERTRAIN', 'enabled', '(hud_v_cruise * CV.MS_TO_KPH)', 'hud_show_car', 'follow_level', 'send_fcw'], {}), '(self.packer_pt, CanBus.POWERTRAIN,\n enabled, hud_v_cruise * CV.MS_TO_KPH, hud_show_car, follow_level, send_fcw)\n', (16022, 16137), False, 'from selfdrive.car.gm import gmcan\n'), ((16590, 16659), 'selfdrive.car.gm.gmcan.create_adas_headlights_status', 'gmcan.create_adas_headlights_status', (['self.packer_obj', 'CanBus.OBSTACLE'], {}), '(self.packer_obj, CanBus.OBSTACLE)\n', (16625, 16659), False, 'from selfdrive.car.gm import gmcan\n'), ((16828, 16883), 'selfdrive.car.gm.gmcan.create_adas_steering_status', 'gmcan.create_adas_steering_status', (['CanBus.OBSTACLE', 'idx'], {}), '(CanBus.OBSTACLE, idx)\n', (16861, 16883), False, 'from selfdrive.car.gm import gmcan\n'), ((16908, 16987), 'selfdrive.car.gm.gmcan.create_adas_accelerometer_speed_status', 'gmcan.create_adas_accelerometer_speed_status', (['CanBus.OBSTACLE', 'CS.out.vEgo', 'idx'], {}), '(CanBus.OBSTACLE, CS.out.vEgo, idx)\n', (16952, 16987), False, 'from selfdrive.car.gm import gmcan\n'), ((17701, 17790), 'selfdrive.car.gm.gmcan.create_lka_icon_command', 'gmcan.create_lka_icon_command', (['CanBus.SW_GMLAN', 'lka_active', 'lka_critical', 'steer_alert'], {}), '(CanBus.SW_GMLAN, lka_active, lka_critical,\n steer_alert)\n', (17730, 17790), False, 'from selfdrive.car.gm import gmcan\n'), ((2528, 2622), 'selfdrive.car.apply_std_steer_torque_limits', 'apply_std_steer_torque_limits', (['new_steer', 'self.apply_steer_last', 'CS.out.steeringTorque', 'P'], {}), '(new_steer, self.apply_steer_last, CS.out.\n steeringTorque, P)\n', (2557, 2622), False, 'from selfdrive.car import apply_std_steer_torque_limits\n'), ((3062, 3162), 'selfdrive.car.gm.gmcan.create_steering_control', 'gmcan.create_steering_control', (['self.packer_pt', 'CanBus.POWERTRAIN', 'apply_steer', 'idx', 'lkas_enabled'], {}), '(self.packer_pt, CanBus.POWERTRAIN,\n apply_steer, idx, lkas_enabled)\n', (3091, 3162), False, 'from selfdrive.car.gm import gmcan\n'), ((8360, 8477), 'common.numpy_fast.interp', 'interp', (['CS.pitch', 'CS.one_pedal_pitch_brake_adjust_bp', 'CS.one_pedal_pitch_brake_adjust_v[CS.one_pedal_brake_mode]'], {}), '(CS.pitch, CS.one_pedal_pitch_brake_adjust_bp, CS.\n one_pedal_pitch_brake_adjust_v[CS.one_pedal_brake_mode])\n', (8366, 8477), False, 'from common.numpy_fast import interp\n'), ((14072, 14119), 'common.numpy_fast.interp', 'interp', (['CS.out.brake', '[0.0, 0.5]', '[51.0, 100.0]'], {}), '(CS.out.brake, [0.0, 0.5], [51.0, 100.0])\n', (14078, 14119), False, 'from common.numpy_fast import interp\n'), ((14705, 14819), 'selfdrive.car.gm.gmcan.create_friction_brake_command', 'gmcan.create_friction_brake_command', (['self.packer_ch', 'CanBus.CHASSIS', 'apply_brake', 'idx', 'near_stop', 'at_full_stop'], {}), '(self.packer_ch, CanBus.CHASSIS,\n apply_brake, idx, near_stop, at_full_stop)\n', (14740, 14819), False, 'from selfdrive.car.gm import gmcan\n'), ((15332, 15446), 'selfdrive.car.gm.gmcan.create_friction_brake_command', 'gmcan.create_friction_brake_command', (['self.packer_ch', 'CanBus.CHASSIS', 'apply_brake', 'idx', 'near_stop', 'at_full_stop'], {}), '(self.packer_ch, CanBus.CHASSIS,\n apply_brake, idx, near_stop, at_full_stop)\n', (15367, 15446), False, 'from selfdrive.car.gm import gmcan\n'), ((15687, 15799), 'selfdrive.car.gm.gmcan.create_gas_regen_command', 'gmcan.create_gas_regen_command', (['self.packer_pt', 'CanBus.POWERTRAIN', 'apply_gas', 'idx', 'acc_enabled', 'at_full_stop'], {}), '(self.packer_pt, CanBus.POWERTRAIN, apply_gas,\n idx, acc_enabled, at_full_stop)\n', (15717, 15799), False, 'from selfdrive.car.gm import gmcan\n'), ((4204, 4291), 'common.numpy_fast.interp', 'interp', (['v_rel', 'CS.lead_v_rel_long_gas_lockout_bp', 'CS.lead_v_rel_long_gas_lockout_v'], {}), '(v_rel, CS.lead_v_rel_long_gas_lockout_bp, CS.\n lead_v_rel_long_gas_lockout_v)\n', (4210, 4291), False, 'from common.numpy_fast import interp\n'), ((4299, 4391), 'common.numpy_fast.interp', 'interp', (['CS.coasting_lead_v', 'CS.lead_v_long_gas_lockout_bp', 'CS.lead_v_long_gas_lockout_v'], {}), '(CS.coasting_lead_v, CS.lead_v_long_gas_lockout_bp, CS.\n lead_v_long_gas_lockout_v)\n', (4305, 4391), False, 'from common.numpy_fast import interp\n'), ((4398, 4474), 'common.numpy_fast.interp', 'interp', (['ttc', 'CS.lead_ttc_long_gas_lockout_bp', 'CS.lead_ttc_long_gas_lockout_v'], {}), '(ttc, CS.lead_ttc_long_gas_lockout_bp, CS.lead_ttc_long_gas_lockout_v)\n', (4404, 4474), False, 'from common.numpy_fast import interp\n'), ((4486, 4576), 'common.numpy_fast.interp', 'interp', (['(d_time / CS.tr)', 'CS.lead_tr_long_gas_lockout_bp', 'CS.lead_tr_long_gas_lockout_v'], {}), '(d_time / CS.tr, CS.lead_tr_long_gas_lockout_bp, CS.\n lead_tr_long_gas_lockout_v)\n', (4492, 4576), False, 'from common.numpy_fast import interp\n'), ((4583, 4675), 'common.numpy_fast.interp', 'interp', (['CS.coasting_lead_d', 'CS.lead_d_long_gas_lockout_bp', 'CS.lead_d_long_gas_lockout_v'], {}), '(CS.coasting_lead_d, CS.lead_d_long_gas_lockout_bp, CS.\n lead_d_long_gas_lockout_v)\n', (4589, 4675), False, 'from common.numpy_fast import interp\n'), ((7626, 7778), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode]', 'CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode]'], {}), '(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.\n one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.\n one_pedal_brake_mode])\n', (7632, 7778), False, 'from common.numpy_fast import interp\n'), ((8182, 8334), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_brake_mode]', 'CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_brake_mode]'], {}), '(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.\n one_pedal_brake_mode], CS.one_pedal_mode_stop_apply_brake_v[CS.\n one_pedal_brake_mode])\n', (8188, 8334), False, 'from common.numpy_fast import interp\n'), ((8597, 8689), 'common.numpy_fast.interp', 'interp', (['time_since_brake', 'CS.one_pedal_mode_ramp_time_bp', 'CS.one_pedal_mode_ramp_time_v'], {}), '(time_since_brake, CS.one_pedal_mode_ramp_time_bp, CS.\n one_pedal_mode_ramp_time_v)\n', (8603, 8689), False, 'from common.numpy_fast import interp\n'), ((13710, 13764), 'common.numpy_fast.interp', 'interp', (['CS.hvb_wattage', 'CS.hvb_wattage_bp', '[0.0, 50.0]'], {}), '(CS.hvb_wattage, CS.hvb_wattage_bp, [0.0, 50.0])\n', (13716, 13764), False, 'from common.numpy_fast import interp\n'), ((13844, 13898), 'common.numpy_fast.interp', 'interp', (['CS.hvb_wattage', 'CS.hvb_wattage_bp', '[0.0, 50.0]'], {}), '(CS.hvb_wattage, CS.hvb_wattage_bp, [0.0, 50.0])\n', (13850, 13898), False, 'from common.numpy_fast import interp\n'), ((5106, 5197), 'common.numpy_fast.interp', 'interp', (['v_rel', 'CS.lead_v_rel_long_brake_lockout_bp', 'CS.lead_v_rel_long_brake_lockout_v'], {}), '(v_rel, CS.lead_v_rel_long_brake_lockout_bp, CS.\n lead_v_rel_long_brake_lockout_v)\n', (5112, 5197), False, 'from common.numpy_fast import interp\n'), ((5207, 5303), 'common.numpy_fast.interp', 'interp', (['CS.coasting_lead_v', 'CS.lead_v_long_brake_lockout_bp', 'CS.lead_v_long_brake_lockout_v'], {}), '(CS.coasting_lead_v, CS.lead_v_long_brake_lockout_bp, CS.\n lead_v_long_brake_lockout_v)\n', (5213, 5303), False, 'from common.numpy_fast import interp\n'), ((5312, 5397), 'common.numpy_fast.interp', 'interp', (['ttc', 'CS.lead_ttc_long_brake_lockout_bp', 'CS.lead_ttc_long_brake_lockout_v'], {}), '(ttc, CS.lead_ttc_long_brake_lockout_bp, CS.lead_ttc_long_brake_lockout_v\n )\n', (5318, 5397), False, 'from common.numpy_fast import interp\n'), ((5406, 5500), 'common.numpy_fast.interp', 'interp', (['(d_time / CS.tr)', 'CS.lead_tr_long_brake_lockout_bp', 'CS.lead_tr_long_brake_lockout_v'], {}), '(d_time / CS.tr, CS.lead_tr_long_brake_lockout_bp, CS.\n lead_tr_long_brake_lockout_v)\n', (5412, 5500), False, 'from common.numpy_fast import interp\n'), ((5509, 5605), 'common.numpy_fast.interp', 'interp', (['CS.coasting_lead_d', 'CS.lead_d_long_brake_lockout_bp', 'CS.lead_d_long_brake_lockout_v'], {}), '(CS.coasting_lead_d, CS.lead_d_long_brake_lockout_bp, CS.\n lead_d_long_brake_lockout_v)\n', (5515, 5605), False, 'from common.numpy_fast import interp\n'), ((9192, 9362), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.one_pedal_mode_stop_apply_brake_bp[CS.one_pedal_mode_ramp_mode_last]', 'CS.one_pedal_mode_stop_apply_brake_v[CS.one_pedal_mode_ramp_mode_last]'], {}), '(CS.vEgo, CS.one_pedal_mode_stop_apply_brake_bp[CS.\n one_pedal_mode_ramp_mode_last], CS.one_pedal_mode_stop_apply_brake_v[CS\n .one_pedal_mode_ramp_mode_last])\n', (9198, 9362), False, 'from common.numpy_fast import interp\n'), ((13964, 14011), 'common.numpy_fast.interp', 'interp', (['CS.out.brake', '[0.0, 0.5]', '[51.0, 100.0]'], {}), '(CS.out.brake, [0.0, 0.5], [51.0, 100.0])\n', (13970, 14011), False, 'from common.numpy_fast import interp\n'), ((11202, 11292), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.coasting_over_speed_vEgo_BP_BP', 'CS.coasting_over_speed_vEgo_BP[0]'], {}), '(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.\n coasting_over_speed_vEgo_BP[0])\n', (11208, 11292), False, 'from common.numpy_fast import interp\n'), ((11303, 11393), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.coasting_over_speed_vEgo_BP_BP', 'CS.coasting_over_speed_vEgo_BP[1]'], {}), '(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.\n coasting_over_speed_vEgo_BP[1])\n', (11309, 11393), False, 'from common.numpy_fast import interp\n'), ((11435, 11508), 'common.numpy_fast.interp', 'interp', (['(CS.vEgo / check_speed_ms)', 'coasting_over_speed_vEgo_BP', '[0.0, 1.0]'], {}), '(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0.0, 1.0])\n', (11441, 11508), False, 'from common.numpy_fast import interp\n'), ((11873, 11969), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.coasting_over_speed_vEgo_BP_BP', 'CS.coasting_over_speed_regen_vEgo_BP[0]'], {}), '(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.\n coasting_over_speed_regen_vEgo_BP[0])\n', (11879, 11969), False, 'from common.numpy_fast import interp\n'), ((11980, 12076), 'common.numpy_fast.interp', 'interp', (['CS.vEgo', 'CS.coasting_over_speed_vEgo_BP_BP', 'CS.coasting_over_speed_regen_vEgo_BP[1]'], {}), '(CS.vEgo, CS.coasting_over_speed_vEgo_BP_BP, CS.\n coasting_over_speed_regen_vEgo_BP[1])\n', (11986, 12076), False, 'from common.numpy_fast import interp\n'), ((12118, 12191), 'common.numpy_fast.interp', 'interp', (['(CS.vEgo / check_speed_ms)', 'coasting_over_speed_vEgo_BP', '[0.0, 1.0]'], {}), '(CS.vEgo / check_speed_ms, coasting_over_speed_vEgo_BP, [0.0, 1.0])\n', (12124, 12191), False, 'from common.numpy_fast import interp\n'), ((13441, 13495), 'common.numpy_fast.interp', 'interp', (['CS.hvb_wattage', 'CS.hvb_wattage_bp', '[0.0, 50.0]'], {}), '(CS.hvb_wattage, CS.hvb_wattage_bp, [0.0, 50.0])\n', (13447, 13495), False, 'from common.numpy_fast import interp\n')] |
# Program name: atomic1D/reference/build_json.py
# Author: <NAME>
# Author email: <EMAIL>
# Date of creation: 14 July 2017
#
#
# Makes data_dict and copies it into a .json file 'sd1d-case-05.json'
filename = 'sd1d-case-05'
from boutdata.collect import collect
data_dict = {}
# Normalisation factor for temperature - T * Tnorm returns in eV
data_dict["Tnorm"] = collect("Tnorm")
# Normalisation factor for density - N * Nnorm returns in m^-3
data_dict["Nnorm"] = collect("Nnorm")
# Plasma pressure (normalised). Pe = 2 Ne Te => P/Ne = Te (and assume Ti=Te)
data_dict["P"] = collect("P")
# Electron density (normalised)
data_dict["Ne"] = collect("Ne")
# Neutral density (normalised)
data_dict["Nn"] = collect("Nn")
# Help for user
data_dict["help"] = "Contains outputs from Boutprojects/SD1D/case-05 example. Created with data_dict_export.py - stored in Github.com/TBody/atomic1D/reference"
from copy import deepcopy
import numpy as np
import json
# Need to 'jsonify' the numpy arrays (i.e. convert to nested lists) so that they can be stored in plain-text
# Deep-copy data to a new dictionary and then edit that one (i.e. break the data pointer association - keep data_dict unchanged in case you want to run a copy-verify on it)
data_dict_jsonified = deepcopy(data_dict)
numpy_ndarrays = [];
for key, element in data_dict.items():
if type(element) == np.ndarray:
# Store which keys correspond to numpy.ndarray, so that you can de-jsonify the arrays when reading
numpy_ndarrays.append(key)
data_dict_jsonified[key] = data_dict_jsonified[key].tolist()
data_dict_jsonified['numpy_ndarrays'] = numpy_ndarrays
# Encode help
# >> data_dict['help'] = 'help string'
# <<Use original filename, except with .json instead of .dat extension>>
with open('{}.json'.format(filename),'w') as fp:
json.dump(data_dict_jsonified, fp, sort_keys=True, indent=4) | [
"boutdata.collect.collect",
"json.dump",
"copy.deepcopy"
] | [((367, 383), 'boutdata.collect.collect', 'collect', (['"""Tnorm"""'], {}), "('Tnorm')\n", (374, 383), False, 'from boutdata.collect import collect\n'), ((468, 484), 'boutdata.collect.collect', 'collect', (['"""Nnorm"""'], {}), "('Nnorm')\n", (475, 484), False, 'from boutdata.collect import collect\n'), ((579, 591), 'boutdata.collect.collect', 'collect', (['"""P"""'], {}), "('P')\n", (586, 591), False, 'from boutdata.collect import collect\n'), ((642, 655), 'boutdata.collect.collect', 'collect', (['"""Ne"""'], {}), "('Ne')\n", (649, 655), False, 'from boutdata.collect import collect\n'), ((705, 718), 'boutdata.collect.collect', 'collect', (['"""Nn"""'], {}), "('Nn')\n", (712, 718), False, 'from boutdata.collect import collect\n'), ((1260, 1279), 'copy.deepcopy', 'deepcopy', (['data_dict'], {}), '(data_dict)\n', (1268, 1279), False, 'from copy import deepcopy\n'), ((1825, 1885), 'json.dump', 'json.dump', (['data_dict_jsonified', 'fp'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data_dict_jsonified, fp, sort_keys=True, indent=4)\n', (1834, 1885), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 15 08:32:03 2021
@author: User
"""
import numpy as np
import matplotlib.pyplot as plt
a = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
print(a)
print(a[0])
print(a.ndim) #te dice la cantidad de ejes (o dimensiones) del arreglo
print(a.shape) #Te va a dar una tupla de enteros que indican la cantidad de elementos en cada eje.
print(a.size)
#%%
vec_fila = a[np.newaxis, :]
print(vec_fila.shape, a.shape)
#%%
print(a.sum())
print(a.min())
print(a.max())
#%%
print(a)
print(a.max(axis=1))
print(a.max(axis=0))
#%%
print(np.random.random(3)) | [
"numpy.random.random",
"numpy.array"
] | [((138, 193), 'numpy.array', 'np.array', (['[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]'], {}), '([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n', (146, 193), True, 'import numpy as np\n'), ((577, 596), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (593, 596), True, 'import numpy as np\n')] |
''' Handles calibration library and calibration of subs.
'''
import os.path
import numpy as np
from scipy.stats import trimboth
from kivy.app import App
from loguru import logger
from kivy.properties import BooleanProperty, DictProperty, NumericProperty
from kivy.core.window import Window
from jocular.table import Table
from jocular.utils import make_unique_filename
from jocular.component import Component
from jocular.settingsmanager import Settings
from jocular.image import Image, save_image, fits_in_dir
date_time_format = '%d %b %y %H:%M'
class Calibrator(Component, Settings):
save_settings = ['apply_dark', 'apply_flat', 'apply_bias']
masters = DictProperty({})
apply_flat = BooleanProperty(False)
apply_dark = BooleanProperty(False)
apply_bias = BooleanProperty(False)
use_l_filter = BooleanProperty(True)
exposure_tol = NumericProperty(5)
temperature_tol = NumericProperty(5)
dark_days_tol = NumericProperty(1)
flat_days_tol = NumericProperty(60)
tab_name = 'Calibration'
configurables = [
('use_l_filter', {'name': 'use light flat?', 'switch': '',
'help': 'If there is no flat for the given filter, use a light flat if it exists'}),
('exposure_tol', {'name': 'exposure tolerance', 'float': (0, 30, 1),
'fmt': '{:.0f} seconds',
'help': 'When selecting a dark, select those within this exposure tolerance'}),
('temperature_tol', {'name': 'temperature tolerance', 'float': (0, 40, 1),
'fmt': '{:.0f} degrees',
'help': 'When selecting a dark, restrict to those within this temperature tolerance'}),
('dark_days_tol', {'name': 'dark age tolerance', 'float': (0, 300, 1),
'fmt': '{:.0f} days',
'help': 'Maximum age of darks to use if no temperature was specified'}),
('flat_days_tol', {'name': 'flat age tolerance', 'float': (0, 300, 1),
'fmt': '{:.0f} days',
'help': 'Maximum age of flats to use'}),
]
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.app = App.get_running_app()
self.calibration_dir = self.app.get_path('calibration')
self.masters = {} # map from name to FITs Image instance
self.library = {} # map from name to calibration table info
''' construct above dicts from calibration FITs in calibration directory
'''
for f in fits_in_dir(self.calibration_dir):
path = os.path.join(self.calibration_dir, f)
try:
s = Image(path)
if s.is_master:
self.add_to_library(s)
except Exception as e:
logger.warning('Calibrator: unable to parse calibration {:} ({:})'.format(f, e))
def on_new_object(self, *args):
n_masters = len(self.library)
if n_masters > 0:
self.info('{:d} masters'.format(n_masters))
else:
self.info('no masters')
def add_to_library(self, m):
''' called on initialisation and when we save a new master
'''
# keys are full names so they can be reliably deleted
self.masters[m.fullname] = m
self.library[m.fullname] = {
'name': m.name,
'type': m.sub_type,
'exposure': str(m.exposure) if m.exposure is not None else '???',
'temperature': str(m.temperature) if m.temperature is not None else '???',
'filter': m.filter,
'created': m.create_time.strftime(date_time_format),
'shape_str': m.shape_str,
'age': m.age,
'nsubs': m.nsubs if m.nsubs is not None else 0
}
def create_master(self, sub_type=None, exposure=None, temperature=None, filt=None):
''' Called by ObjectIO to save an existing stack capture by Jocular as a calibration master
'''
logger.info('save master type {:} expo {:} temp {:} filt {:}'.format(
sub_type, exposure, temperature, filt))
stacker = Component.get('Stacker')
# force the use of method that the user has chosen or set up by default for this type of calib
master = stacker.get_stack(filt, calibration=True)
''' Apply bad pixel mapping to calibration frames
If dark, find hot pixels in master and remove, otherwise use existing BPM
NB not fully tested
'''
bpm = Component.get('BadPixelMap')
if sub_type == 'dark':
master = bpm.do_bpm(master, bpm.find_hot_pixels(master))
logger.debug('created BPM from darks and applied it')
else:
master = bpm.do_bpm(master)
logger.debug('applied BPM to master')
''' Flats were divided thru by their robust mean to account for level differences
but then scaled to 50% to enable B/W controls; so multiply by 2
'''
if sub_type == 'flat':
master = 2 * master
self.save_master(data=master, exposure=exposure, filt=filt, temperature=temperature,
sub_type=sub_type, nsubs=stacker.get_selected_sub_count())
# add to notes field of current DSO
Component.get('Notes').notes = 'exposure {:} filter {:} temperature {:}'.format(exposure, filt, temperature)
def save_master(self, data=None, exposure=None, filt=None, temperature=None, sub_type=None, nsubs=None):
''' Save master and add to library to make it available immediately. Called both by
create_master above and by the Watched camera for any alien master subs. The difference is
that create_master above does BPM/flat handling etc so only applies to natively-captured
calibration masters.
'''
logger.info('new master type {:} expo {:} temp {:} filt {:} nsubs {:}'.format(
sub_type, exposure, temperature, filt, nsubs))
name = 'master{:}.fit'.format(sub_type)
path = make_unique_filename(os.path.join(self.calibration_dir, name))
save_image(data=data, path=path, exposure=exposure, filt=filt, temperature=temperature,
sub_type='master ' + sub_type, nsubs=nsubs)
self.add_to_library(Image(path))
def calibrate(self, sub):
# Given a light sub, apply calibration. Fails silently if no suitable calibration masters.
sub.calibrations = set({})
if not self.library:
self.info('no library')
return
if not (self.apply_dark or self.apply_bias or self.apply_flat):
self.info('none')
return
# get all masters (check speed, but should be quick)
dark = self.get_dark(sub)
flat = self.get_flat(sub)
bias = self.get_bias(sub)
logger.debug('D {:} F {:} B {:}'.format(dark, flat, bias))
D = self.get_master(dark)
# if D is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(dark, np.min(D), np.max(D), np.median(D), np.mean(D)))
F = self.get_master(flat)
# if F is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(flat, np.min(F), np.max(F), np.median(F), np.mean(F)))
B = self.get_master(bias)
# if B is not None:
# print('{:} min {:} max {:} median {:} mean {:}'.format(bias, np.min(B), np.max(B), np.median(B), np.mean(B)))
im = sub.get_image()
if self.apply_dark and self.apply_flat:
if dark is not None and flat is not None:
im = (im - D) / F
sub.calibrations = {'dark', 'flat'}
elif dark is not None:
im = im - D
sub.calibrations = {'dark'}
elif flat is not None:
if bias is not None:
sub.calibrations = {'flat', 'bias'}
im = (im - B) / F
else:
sub.calibrations = {'flat'}
im = im / F # inadvisable, but we allow it
elif self.apply_dark:
if dark is not None:
im = im - D
sub.calibrations = {'dark'}
elif self.apply_flat:
if flat is not None:
if bias is not None:
sub.calibrations = {'flat', 'bias'}
im = (im - B) / F
else:
sub.calibrations = {'flat'}
im = im / F
elif self.apply_bias:
if bias is not None:
sub.calibrations = {'bias'}
im = im - B
# limit
im[im < 0] = 0
im[im > 1] = 1
sub.image = im
applied = ' '.join(list(sub.calibrations))
if applied:
self.info(applied)
else:
self.info('none suitable')
def get_dark(self, sub):
# Find suitable dark for this sub given its parameters
if sub.exposure is None:
return None
# choose darks that are the right shape with exposure within tolerance
darks = {k: v for k, v in self.masters.items()
if v.shape == sub.shape and
v.sub_type == 'dark' and
v.exposure is not None and
abs(v.exposure - sub.exposure) < self.exposure_tol}
temperature = Component.get('Session').temperature
if temperature is not None:
# we know temperature, select those with temperatures and within tolerance
darks = [k for k, v in darks.items() if
v.temperature is not None and abs(v.temperature - temperature) < self.temperature_tol]
else:
# find those within date tolerance (set to 1 to get darks in current session)
darks = [k for k, v in darks.items() if v.age < self.dark_days_tol]
# if we have darks, return name of first one
return darks[0] if len(darks) > 0 else None
def get_bias(self, sub):
# get the most recent bias
bias = {k: v.age for k, v in self.masters.items()
if v.shape == sub.shape and v.sub_type == 'bias' }
return min(bias, key=bias.get) if len(bias) > 0 else None
def get_flat(self, sub):
# flats of right shape
flats = {k:v for k, v in self.masters.items()
if v.shape == sub.shape and v.sub_type == 'flat'}
# flat in required filter
if sub.filter is not None:
flats_in_filt = {k: v for k, v in flats.items() if v.filter is not None and v.filter == sub.filter}
else:
flats_in_filt = {}
# if we have none and can use L filter, use these
if (len(flats_in_filt) == 0) and self.use_l_filter:
flats_in_filt = {k:v for k, v in flats.items() if v.filter == 'L'}
# do we have any now? if not, return
if len(flats_in_filt) == 0:
return None
# find any within day tolerance, noting that this compares the date of the flat with
# the date of the sub (i.e. not necessarily the current date)
flats = {k: abs(v.create_time - sub.create_time).days for k,v in flats_in_filt.items()}
flats = {k: v for k, v in flats.items() if v <= self.flat_days_tol}
# find most recent if there is a choice
for k in sorted(flats, key=flats.get):
return k
return None
def get_master(self, name):
if name is None:
return None
# Retrieve image (NB loaded on demand, so effectively a cache)
return self.masters[name].get_image()
def _most_subs(self, cands):
c = {k: cands[k]['nsubs'] for k in cands.keys()}
return max(c, key=c.get)
def calibrate_flat(self, sub):
''' Perform calibrations on flat which include subtracting bias if
available , and rescaling so the mean intensity is .5 (because outlier rejection
methods used to combine flat subs work best with normalised frames due to changing
light levels; the value of .5 is so that we can use B & W controls; we rescale to
a mean of 1 when saving since this is what a good flat needs for dividing)
'''
im = sub.get_image()
# subtract bias if available
bias = self.get_bias(sub)
if bias is not None:
#print('subtracting bias')
im = im - self.get_master(bias)
# normalise by mean of image in central 3rd zone
perc = 75 # retain central 75% of points when computing mean
w, h = im.shape
w1, w2 = int(w / 3), int(2 * w / 3)
h1, h2 = int(h / 3), int(2 * h / 3)
imr = im[h1: h2, w1: w2]
robust_mean = np.mean(trimboth(np.sort(imr.ravel(), axis=0),
(100 - perc)/100, axis=0), axis=0)
sub.image = .5 * im / robust_mean
def build_calibrations(self):
''' Contruct table from library
'''
return Table(
size=Window.size,
data=self.library,
name='Calibration masters',
description='Calibration masters',
cols={
'Name': {'w': 300, 'align': 'left', 'field': 'name'},
'Type': {'w': 60, 'field': 'type', 'align': 'left'},
'Exposure': {'w': 80, 'field': 'exposure'},
'Temp. C': {'w': 80, 'field': 'temperature', 'type': str},
'Filter': {'w': 80, 'field': 'filter'},
'Created': {'w': 180, 'field': 'created', 'sort': {'DateFormat': date_time_format}},
'Size': {'w': 110, 'field': 'shape_str'},
'Age': {'w': 50, 'field': 'age', 'type': int},
'Subs': {'w': 50, 'field': 'nsubs', 'type': int}
},
actions={'move to delete dir': self.move_to_delete_folder},
on_hide_method=self.app.table_hiding
)
def show_calibration_table(self, *args):
''' Called when user clicks 'library' on GUI
'''
if not hasattr(self, 'calibration_table'):
self.calibration_table = self.build_calibrations()
self.app.showing = 'calibration'
# check for redraw
if self.calibration_table not in self.app.gui.children:
self.app.gui.add_widget(self.calibration_table, index=0)
self.calibration_table.show()
def move_to_delete_folder(self, *args):
objio = Component.get('ObjectIO')
for nm in self.calibration_table.selected:
if nm in self.library:
objio.delete_file(os.path.join(self.calibration_dir, nm))
del self.library[nm]
del self.masters[nm]
logger.info('deleted {:} calibration masters'.format(len(self.calibration_table.selected)))
self.calibration_table.update()
| [
"jocular.table.Table",
"kivy.properties.NumericProperty",
"loguru.logger.debug",
"jocular.image.fits_in_dir",
"jocular.image.save_image",
"jocular.component.Component.get",
"jocular.image.Image",
"kivy.properties.BooleanProperty",
"kivy.properties.DictProperty",
"kivy.app.App.get_running_app"
] | [((670, 686), 'kivy.properties.DictProperty', 'DictProperty', (['{}'], {}), '({})\n', (682, 686), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((704, 726), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (719, 726), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((744, 766), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (759, 766), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((784, 806), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (799, 806), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((827, 848), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(True)'], {}), '(True)\n', (842, 848), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((868, 886), 'kivy.properties.NumericProperty', 'NumericProperty', (['(5)'], {}), '(5)\n', (883, 886), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((909, 927), 'kivy.properties.NumericProperty', 'NumericProperty', (['(5)'], {}), '(5)\n', (924, 927), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((948, 966), 'kivy.properties.NumericProperty', 'NumericProperty', (['(1)'], {}), '(1)\n', (963, 966), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((987, 1006), 'kivy.properties.NumericProperty', 'NumericProperty', (['(60)'], {}), '(60)\n', (1002, 1006), False, 'from kivy.properties import BooleanProperty, DictProperty, NumericProperty\n'), ((2114, 2135), 'kivy.app.App.get_running_app', 'App.get_running_app', ([], {}), '()\n', (2133, 2135), False, 'from kivy.app import App\n'), ((2450, 2483), 'jocular.image.fits_in_dir', 'fits_in_dir', (['self.calibration_dir'], {}), '(self.calibration_dir)\n', (2461, 2483), False, 'from jocular.image import Image, save_image, fits_in_dir\n'), ((4063, 4087), 'jocular.component.Component.get', 'Component.get', (['"""Stacker"""'], {}), "('Stacker')\n", (4076, 4087), False, 'from jocular.component import Component\n'), ((4454, 4482), 'jocular.component.Component.get', 'Component.get', (['"""BadPixelMap"""'], {}), "('BadPixelMap')\n", (4467, 4482), False, 'from jocular.component import Component\n'), ((6058, 6194), 'jocular.image.save_image', 'save_image', ([], {'data': 'data', 'path': 'path', 'exposure': 'exposure', 'filt': 'filt', 'temperature': 'temperature', 'sub_type': "('master ' + sub_type)", 'nsubs': 'nsubs'}), "(data=data, path=path, exposure=exposure, filt=filt, temperature=\n temperature, sub_type='master ' + sub_type, nsubs=nsubs)\n", (6068, 6194), False, 'from jocular.image import Image, save_image, fits_in_dir\n'), ((12993, 13716), 'jocular.table.Table', 'Table', ([], {'size': 'Window.size', 'data': 'self.library', 'name': '"""Calibration masters"""', 'description': '"""Calibration masters"""', 'cols': "{'Name': {'w': 300, 'align': 'left', 'field': 'name'}, 'Type': {'w': 60,\n 'field': 'type', 'align': 'left'}, 'Exposure': {'w': 80, 'field':\n 'exposure'}, 'Temp. C': {'w': 80, 'field': 'temperature', 'type': str},\n 'Filter': {'w': 80, 'field': 'filter'}, 'Created': {'w': 180, 'field':\n 'created', 'sort': {'DateFormat': date_time_format}}, 'Size': {'w': 110,\n 'field': 'shape_str'}, 'Age': {'w': 50, 'field': 'age', 'type': int},\n 'Subs': {'w': 50, 'field': 'nsubs', 'type': int}}", 'actions': "{'move to delete dir': self.move_to_delete_folder}", 'on_hide_method': 'self.app.table_hiding'}), "(size=Window.size, data=self.library, name='Calibration masters',\n description='Calibration masters', cols={'Name': {'w': 300, 'align':\n 'left', 'field': 'name'}, 'Type': {'w': 60, 'field': 'type', 'align':\n 'left'}, 'Exposure': {'w': 80, 'field': 'exposure'}, 'Temp. C': {'w': \n 80, 'field': 'temperature', 'type': str}, 'Filter': {'w': 80, 'field':\n 'filter'}, 'Created': {'w': 180, 'field': 'created', 'sort': {\n 'DateFormat': date_time_format}}, 'Size': {'w': 110, 'field':\n 'shape_str'}, 'Age': {'w': 50, 'field': 'age', 'type': int}, 'Subs': {\n 'w': 50, 'field': 'nsubs', 'type': int}}, actions={'move to delete dir':\n self.move_to_delete_folder}, on_hide_method=self.app.table_hiding)\n", (12998, 13716), False, 'from jocular.table import Table\n'), ((14470, 14495), 'jocular.component.Component.get', 'Component.get', (['"""ObjectIO"""'], {}), "('ObjectIO')\n", (14483, 14495), False, 'from jocular.component import Component\n'), ((4595, 4648), 'loguru.logger.debug', 'logger.debug', (['"""created BPM from darks and applied it"""'], {}), "('created BPM from darks and applied it')\n", (4607, 4648), False, 'from loguru import logger\n'), ((4715, 4752), 'loguru.logger.debug', 'logger.debug', (['"""applied BPM to master"""'], {}), "('applied BPM to master')\n", (4727, 4752), False, 'from loguru import logger\n'), ((5215, 5237), 'jocular.component.Component.get', 'Component.get', (['"""Notes"""'], {}), "('Notes')\n", (5228, 5237), False, 'from jocular.component import Component\n'), ((6230, 6241), 'jocular.image.Image', 'Image', (['path'], {}), '(path)\n', (6235, 6241), False, 'from jocular.image import Image, save_image, fits_in_dir\n'), ((9381, 9405), 'jocular.component.Component.get', 'Component.get', (['"""Session"""'], {}), "('Session')\n", (9394, 9405), False, 'from jocular.component import Component\n'), ((2579, 2590), 'jocular.image.Image', 'Image', (['path'], {}), '(path)\n', (2584, 2590), False, 'from jocular.image import Image, save_image, fits_in_dir\n')] |
from typing import Tuple
import torch
class RunningMeanStd:
"""
Utility Function to compute a running mean and variance calculator
:param epsilon: Small number to prevent division by zero for calculations
:param shape: Shape of the RMS object
:type epsilon: float
:type shape: Tuple
"""
def __init__(self, epsilon: float = 1e-4, shape: Tuple = ()):
self.mean = torch.zeros(shape).double()
self.var = torch.ones(shape).double()
self.count = epsilon
def update(self, batch: torch.Tensor):
batch_mean = torch.mean(batch, axis=0)
batch_var = torch.var(batch, axis=0)
batch_count = batch.shape[0]
total_count = self.count + batch_count
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / total_count
M2 = (
self.var * self.count
+ batch_var * batch_count
+ (delta ** 2) * self.count * batch_count / total_count
)
self.mean = new_mean
self.var = M2 / (total_count - 1)
self.count = total_count
| [
"torch.mean",
"torch.zeros",
"torch.var",
"torch.ones"
] | [((574, 599), 'torch.mean', 'torch.mean', (['batch'], {'axis': '(0)'}), '(batch, axis=0)\n', (584, 599), False, 'import torch\n'), ((620, 644), 'torch.var', 'torch.var', (['batch'], {'axis': '(0)'}), '(batch, axis=0)\n', (629, 644), False, 'import torch\n'), ((406, 424), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (417, 424), False, 'import torch\n'), ((453, 470), 'torch.ones', 'torch.ones', (['shape'], {}), '(shape)\n', (463, 470), False, 'import torch\n')] |
import mtoa.ui.ae.templates as templates
import pymel.core as pm
import maya.cmds as cmds
import mtoa.ui.ae.utils as aeUtils
class aiPotaTemplate(templates.AttributeTemplate):
"""
def filenameEditBokeh(self, mData) :
attr = self.nodeAttr('aiBokehEXRPath')
cmds.setAttr(attr,mData,type="string")
def LoadFilenameButtonPushBokeh(self, *args):
basicFilter = 'All Files (*.*)'
ret = cmds.fileDialog2(fileFilter=basicFilter, dialogStyle=2, cap='Select sample_bokeh file location',fm=0)
if ret is not None and len(ret):
self.filenameEditBokeh(ret[0])
cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=ret[0])
def filenameNewBokeh(self, nodeName):
path = cmds.textFieldButtonGrp("filenameBokehGrp", label="Bokeh AOV EXR path", changeCommand=self.filenameEditBokeh, width=300)
cmds.textFieldButtonGrp(path, edit=True, text=cmds.getAttr(nodeName))
cmds.textFieldButtonGrp(path, edit=True, buttonLabel="...",
buttonCommand=self.LoadFilenameButtonPushBokeh)
def filenameReplaceBokeh(self, nodeName):
cmds.textFieldButtonGrp("filenameBokehGrp", edit=True, text=cmds.getAttr(nodeName) )
"""
def setup(self):
self.beginLayout("Polynomial Optics", collapse=False)
self.addControl("aiLensModel", label="Lens Model")
self.addControl("aiSensorWidth", label="Sensor Width (mm)")
self.addControl("aiWavelength", label="Wavelength (nm)")
self.addControl("aiDof", label="Enable depth of field")
self.addControl("aiFstop", label="F-stop")
self.addControl("aiFocalDistance", label="Focus distance (cm)")
self.addControl("aiExtraSensorShift", label="Extra Sensor shift (mm)")
self.addControl("aiVignettingRetries", label="Vignetting retries")
self.addControl("aiApertureBlades", label="Aperture blades")
self.addControl("aiProperRayDerivatives", label="Proper Ray Derivatives")
# add these in the aovshader template instead
# self.suppress('normalCamera')
# self.suppress('hardwareColor')
self.endLayout()
"""
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.addSeparator()
self.beginLayout("AOV shader", collapse=False)
self.addControl("aiBackwardSamples", label="Backwards samples")
self.addControl("aiMinimumRgb", label="Minimum RGB")
self.addCustom("aiBokehEXRPath", self.filenameNewBokeh, self.filenameReplaceBokeh)
self.endLayout()
"""
templates.registerTranslatorUI(aiPotaTemplate, "camera", "pota") | [
"mtoa.ui.ae.templates.registerTranslatorUI"
] | [((2688, 2752), 'mtoa.ui.ae.templates.registerTranslatorUI', 'templates.registerTranslatorUI', (['aiPotaTemplate', '"""camera"""', '"""pota"""'], {}), "(aiPotaTemplate, 'camera', 'pota')\n", (2718, 2752), True, 'import mtoa.ui.ae.templates as templates\n')] |
import base64
import json
from webhook import post_webhook
from datetime import datetime
def hello_pubsub(event, context):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
#post_webhook(message=f'{pubsub_message}', timestamp='now', status='status', title='title')
message = json.loads(pubsub_message)
message = message['incident']
#post_webhook(message, timestamp, status, title='Monitoring'):
null = None
status = 'Status'
log_message = ''
title = 'Monitoring Alert'
status = message['state'].title()
timestamp = datetime.utcfromtimestamp(message["started_at"]).isoformat()
log_message += f'Started: {timestamp} UTC'
color = 16772608
if message['ended_at'] is not None:
timestamp = datetime.utcfromtimestamp(message["ended_at"]).isoformat()
log_message += f'\nEnded: {timestamp} UTC'
color = 65297
title = message['policy_name']
log_message += f'\n{message["summary"]}'
log_message += f'\n[Monitor Event]({message["url"]})'
post_webhook(message=log_message, timestamp=timestamp, status=status, title=title, color=color)
| [
"datetime.datetime.utcfromtimestamp",
"json.loads",
"webhook.post_webhook",
"base64.b64decode"
] | [((491, 517), 'json.loads', 'json.loads', (['pubsub_message'], {}), '(pubsub_message)\n', (501, 517), False, 'import json\n'), ((1226, 1326), 'webhook.post_webhook', 'post_webhook', ([], {'message': 'log_message', 'timestamp': 'timestamp', 'status': 'status', 'title': 'title', 'color': 'color'}), '(message=log_message, timestamp=timestamp, status=status, title\n =title, color=color)\n', (1238, 1326), False, 'from webhook import post_webhook\n'), ((333, 364), 'base64.b64decode', 'base64.b64decode', (["event['data']"], {}), "(event['data'])\n", (349, 364), False, 'import base64\n'), ((763, 811), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["message['started_at']"], {}), "(message['started_at'])\n", (788, 811), False, 'from datetime import datetime\n'), ((952, 998), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["message['ended_at']"], {}), "(message['ended_at'])\n", (977, 998), False, 'from datetime import datetime\n')] |
import argparse, os, fnmatch, json, joblib
import pandas as pd
from sklearn.mixture import GaussianMixture
from sklearn.metrics import adjusted_rand_score
# Reference paper - https://arxiv.org/abs/1906.11373
# "Unsupervised Methods for Identifying Pass Coverage Among Defensive Backs with NFL Player Tracking Data"
STATS_PREFIX = "week"
SKIP_COLS_KEY = "global_skip_cols"
ONLY_CLOSEST_KEY = "only_closest"
CLOSE_TO_BR_KEY = "close_to_br"
SELECT_GROUP_KEY = "select_group_by"
GROUP_BY = ["gameId", "playId"]
MAX_COL = "closest_frames"
def run_gmm_for_g_and_k(file_data, g, k, skip_cols, only_closest, close_to_br):
file_count = len(file_data)
data = pd.DataFrame()
for j in range(file_count):
if j == k:
continue
data = data.append(file_data[j], ignore_index=True)
if only_closest == 1:
data = data.loc[data.groupby(GROUP_BY)[MAX_COL].idxmax()].reset_index(
drop=True)
elif len(close_to_br) != 0:
data = data[data[CLOSE_TO_BR_KEY].isin(close_to_br)]
x = data.drop(skip_cols, axis = 1).dropna()
gmm = GaussianMixture(n_components=g,
covariance_type="full", max_iter=1000)
gmm = gmm.fit(x)
x_k = file_data[k].drop(skip_cols, axis = 1).dropna()
gmm_k = GaussianMixture(n_components=g,
covariance_type="full", max_iter=1000)
gmm_k = gmm_k.fit(x_k)
# predict cluster for the k week on both models
y = gmm.predict(x_k)
y_k = gmm_k.predict(x_k)
ari = adjusted_rand_score(y, y_k)
# return the computed ari and gmm (skipping k)
return (ari, gmm)
def run_gmm_for_group_count(file_data, group_count, config):
print("Running gmm for group count {}".format(group_count))
ari = []
gmm = []
file_count = len(file_data)
for k in range(file_count):
# print("Running gmm by leaving out index {}".format(k))
(ari_k, gmm_k) = run_gmm_for_g_and_k(file_data, group_count, k,
config[SKIP_COLS_KEY], config[ONLY_CLOSEST_KEY],
config[CLOSE_TO_BR_KEY])
ari.append(ari_k)
gmm.append(gmm_k)
ari_max_index = ari.index(max(ari))
ari_max = ari[ari_max_index]
gmm_max = gmm[ari_max_index]
ari_sum = sum(ari)
result = {
"lowo_index": ari_max_index,
"max_ari": ari_max,
"total_ari": ari_sum,
"gmm": gmm_max
}
return result
def run_gmm_feature_influence(file_data, group_count, skip_lowo, config):
print("Running gmm for group {}, skipping lowo index: {}".format(
group_count, skip_lowo))
if len(file_data) == 0:
return
global_skip_cols = config[SKIP_COLS_KEY]
cols = set(file_data[0].columns) - set(global_skip_cols)
result = {}
for c in cols:
print("Skipping feature {}".format(c))
skip_cols = global_skip_cols + [c]
ari_c, gmm_c = run_gmm_for_g_and_k(file_data, group_count, skip_lowo,
skip_cols, config[ONLY_CLOSEST_KEY], config[CLOSE_TO_BR_KEY])
result[c] = {
"ari": ari_c,
"gmm": gmm_c
}
return result
def save_results(output_folder, gmms, selected_g, influence_aris, config):
groups = sorted(gmms.keys())
gmm_result = {}
for g in groups:
gmm_result[g] = {k: gmms[g][k] for k in gmms[g].keys() - {"gmm"}}
selected_result = { **gmm_result[selected_g] }
selected_result["group_count"] = selected_g
selected_result["selection_key"] = config[SELECT_GROUP_KEY]
if config[ONLY_CLOSEST_KEY] == 1:
selected_result[ONLY_CLOSEST_KEY] = config[ONLY_CLOSEST_KEY]
else:
selected_result[CLOSE_TO_BR_KEY] = config[CLOSE_TO_BR_KEY]
influence_result = {
"group_count": selected_g,
"lowo_index": selected_result["lowo_index"],
"ari_with_all_features": selected_result["max_ari"]
}
feature_result = {}
influences = {}
ari_with_all = selected_result["max_ari"]
for feature in influence_aris:
ari = influence_aris[feature]["ari"]
influences[feature] = {
"influence": ari_with_all - ari,
"ari": ari
}
feature_result = dict(sorted(influences.items(),
key=lambda item: item[1]["influence"], reverse=True))
influence_result["feature_data"] = feature_result
output = {
"group_data": gmm_result,
"selected_group": selected_result,
"feature_influence": influence_result
}
output_path = os.path.join(output_folder, "results.json")
json_data = json.dumps(output, indent=2)
with open(output_path, "w") as output_file:
output_file.write(json_data)
print("Result saved to {}".format(output_path))
output_path = os.path.join(output_folder, "config.json")
json_data = json.dumps(config, indent=2)
with open(output_path, "w") as output_file:
output_file.write(json_data)
print("Config saved to {}".format(output_path))
selected_gmm = gmms[selected_g]["gmm"]
gmm_path = os.path.join(output_folder, "gmm.joblib")
joblib.dump(selected_gmm, gmm_path)
print("GMM model saved to {}".format(gmm_path))
def run_gmm(data_folder, output_folder, config):
stats_files = fnmatch.filter(os.listdir(data_folder), "{}*.csv".format(
STATS_PREFIX))
file_data = []
for sf in stats_files:
print("Working on file {} ...".format(sf))
input_file = os.path.join(data_folder, sf)
stats_data = pd.read_csv(input_file)
file_data.append(stats_data)
gmm_groups = {}
for g in range(config["group_min"], config["group_max"] + 1):
result = run_gmm_for_group_count(file_data, g, config)
gmm_groups[g] = result
group_key = config[SELECT_GROUP_KEY]
selected_group = max(gmm_groups, key= lambda x: gmm_groups[x][group_key])
gmm_influence_result = run_gmm_feature_influence(file_data, selected_group,
gmm_groups[selected_group]["lowo_index"], config)
save_results(output_folder, gmm_groups, selected_group,
gmm_influence_result, config)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_path", type=str, help="specifies the folder containing data files",
required=True)
parser.add_argument(
"--config_path", type=str, help="specifies the json config file",
required=True)
parser.add_argument(
"--output_path", type=str, help="specifies the output folder path",
required=True)
return vars(parser.parse_args())
def main():
args = parse_args()
print("Args: {}".format(args))
data_path = os.path.abspath(args["data_path"])
config_path = os.path.abspath(args["config_path"])
output_path = os.path.abspath(args["output_path"])
with open(config_path) as f:
config = json.load(f)
print("Config: {}".format(config))
run_gmm(data_path, output_path, config)
main()
| [
"os.listdir",
"sklearn.mixture.GaussianMixture",
"argparse.ArgumentParser",
"pandas.read_csv",
"json.dumps",
"sklearn.metrics.adjusted_rand_score",
"os.path.join",
"os.path.abspath",
"json.load",
"pandas.DataFrame",
"joblib.dump"
] | [((654, 668), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (666, 668), True, 'import pandas as pd\n'), ((1026, 1096), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'g', 'covariance_type': '"""full"""', 'max_iter': '(1000)'}), "(n_components=g, covariance_type='full', max_iter=1000)\n", (1041, 1096), False, 'from sklearn.mixture import GaussianMixture\n'), ((1182, 1252), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'g', 'covariance_type': '"""full"""', 'max_iter': '(1000)'}), "(n_components=g, covariance_type='full', max_iter=1000)\n", (1197, 1252), False, 'from sklearn.mixture import GaussianMixture\n'), ((1385, 1412), 'sklearn.metrics.adjusted_rand_score', 'adjusted_rand_score', (['y', 'y_k'], {}), '(y, y_k)\n', (1404, 1412), False, 'from sklearn.metrics import adjusted_rand_score\n'), ((3992, 4035), 'os.path.join', 'os.path.join', (['output_folder', '"""results.json"""'], {}), "(output_folder, 'results.json')\n", (4004, 4035), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4049, 4077), 'json.dumps', 'json.dumps', (['output'], {'indent': '(2)'}), '(output, indent=2)\n', (4059, 4077), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4219, 4261), 'os.path.join', 'os.path.join', (['output_folder', '"""config.json"""'], {}), "(output_folder, 'config.json')\n", (4231, 4261), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4275, 4303), 'json.dumps', 'json.dumps', (['config'], {'indent': '(2)'}), '(config, indent=2)\n', (4285, 4303), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4482, 4523), 'os.path.join', 'os.path.join', (['output_folder', '"""gmm.joblib"""'], {}), "(output_folder, 'gmm.joblib')\n", (4494, 4523), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4525, 4560), 'joblib.dump', 'joblib.dump', (['selected_gmm', 'gmm_path'], {}), '(selected_gmm, gmm_path)\n', (4536, 4560), False, 'import argparse, os, fnmatch, json, joblib\n'), ((5475, 5500), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5498, 5500), False, 'import argparse, os, fnmatch, json, joblib\n'), ((5947, 5981), 'os.path.abspath', 'os.path.abspath', (["args['data_path']"], {}), "(args['data_path'])\n", (5962, 5981), False, 'import argparse, os, fnmatch, json, joblib\n'), ((5997, 6033), 'os.path.abspath', 'os.path.abspath', (["args['config_path']"], {}), "(args['config_path'])\n", (6012, 6033), False, 'import argparse, os, fnmatch, json, joblib\n'), ((6049, 6085), 'os.path.abspath', 'os.path.abspath', (["args['output_path']"], {}), "(args['output_path'])\n", (6064, 6085), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4690, 4713), 'os.listdir', 'os.listdir', (['data_folder'], {}), '(data_folder)\n', (4700, 4713), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4850, 4879), 'os.path.join', 'os.path.join', (['data_folder', 'sf'], {}), '(data_folder, sf)\n', (4862, 4879), False, 'import argparse, os, fnmatch, json, joblib\n'), ((4895, 4918), 'pandas.read_csv', 'pd.read_csv', (['input_file'], {}), '(input_file)\n', (4906, 4918), True, 'import pandas as pd\n'), ((6127, 6139), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6136, 6139), False, 'import argparse, os, fnmatch, json, joblib\n')] |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest.mock import patch, MagicMock
from bossingest.ingest_manager import IngestManager
from bossingest.models import IngestJob
from bossingest.test.setup import SetupTests
from bosscore.test.setup_db import SetupTestDB
from bosscore.error import ErrorCodes
from bosscore.lookup import LookUpKey
import bossutils.aws
from django.contrib.auth.models import User
from ndingest.ndqueue.uploadqueue import UploadQueue
from rest_framework.test import APITestCase
class BossIngestManagerTest(APITestCase):
def setUp(self):
"""
Initialize the database
:return:
"""
dbsetup = SetupTestDB()
self.user = dbsetup.create_super_user(username='testuser', email='<EMAIL>', password='<PASSWORD>')
dbsetup.set_user(self.user)
self.client.force_login(self.user)
dbsetup.insert_ingest_test_data()
setup = SetupTests()
# Get the config_data for v1 schema
config_data = setup.get_ingest_config_data_dict()
self.example_config_data = config_data
self.volumetric_config_data = setup.get_ingest_config_data_dict_volumetric()
# Unit under test.
self.ingest_mgr = IngestManager()
def test_validate_ingest(self):
"""Method to test validation method"""
#Validate schema and config file
response = self.ingest_mgr.validate_config_file(self.example_config_data)
assert (response is True)
#Validate properties
response = self.ingest_mgr.validate_properties()
assert (response is True)
def test_validate_config_file(self):
"""Method to test validation of a config file"""
self.ingest_mgr.validate_config_file(self.example_config_data)
assert(self.ingest_mgr.config is not None)
assert (self.ingest_mgr.config.config_data is not None)
def test_validate_properties(self):
"""Methos to test validation of properties of the config data"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
assert (self.ingest_mgr.collection.name == 'my_col_1')
assert (self.ingest_mgr.experiment.name == 'my_exp_1')
assert (self.ingest_mgr.channel.name == 'my_ch_1')
def test_create_ingest_job(self):
"""Method to test creation of a ingest job from a config_data dict"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.TILE_INGEST)
assert (job.tile_size_x == 512)
assert (job.tile_size_y == 512)
assert (job.tile_size_z == 1)
assert (job.tile_size_t == 1)
def test_create_ingest_job_volumetric(self):
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
assert (job.id is not None)
assert (job.ingest_type == IngestJob.VOLUMETRIC_INGEST)
assert (job.tile_size_x == 1024)
assert (job.tile_size_y == 1024)
assert (job.tile_size_z == 64)
assert (job.tile_size_t == 1)
def test_generate_upload_queue_args_tile_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.TILE_INGEST
assert actual['z_chunk_size'] == 16
def test_generate_upload_queue_args_volumetric_job(self):
"""Ensure ingest_type set properly"""
self.ingest_mgr.validate_config_file(self.volumetric_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr._generate_upload_queue_args(job)
assert actual['ingest_type'] == IngestJob.VOLUMETRIC_INGEST
assert actual['z_chunk_size'] == 64
assert actual['ingest_queue'] is None
def test_tile_bucket_name(self):
""" Test get tile bucket name"""
tile_bucket_name = self.ingest_mgr.get_tile_bucket()
assert(tile_bucket_name is not None)
def test_get_resource_data(self):
"""Run the method and ensure keys set"""
self.ingest_mgr.validate_config_file(self.example_config_data)
self.ingest_mgr.validate_properties()
self.ingest_mgr.owner = self.user.pk
job = self.ingest_mgr.create_ingest_job()
actual = self.ingest_mgr.get_resource_data(job.id)
self.assertIn('boss_key', actual)
self.assertIn('lookup_key', actual)
self.assertIn('channel', actual)
self.assertIn('experiment', actual)
self.assertIn('coord_frame', actual)
| [
"bosscore.test.setup_db.SetupTestDB",
"bossingest.test.setup.SetupTests",
"bossingest.ingest_manager.IngestManager"
] | [((1262, 1275), 'bosscore.test.setup_db.SetupTestDB', 'SetupTestDB', ([], {}), '()\n', (1273, 1275), False, 'from bosscore.test.setup_db import SetupTestDB\n'), ((1522, 1534), 'bossingest.test.setup.SetupTests', 'SetupTests', ([], {}), '()\n', (1532, 1534), False, 'from bossingest.test.setup import SetupTests\n'), ((1825, 1840), 'bossingest.ingest_manager.IngestManager', 'IngestManager', ([], {}), '()\n', (1838, 1840), False, 'from bossingest.ingest_manager import IngestManager\n')] |
import calendar
from typing import Union
import dateutil.parser
from rest_framework import status
from rest_framework.response import Response
from django.utils.cache import get_conditional_response
from django.utils.http import http_date
from ..models import Resource, ResourceVersion
FhirResource = Union[Resource, ResourceVersion]
class ConditionalReadMixin:
def conditional_read(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
res_data = serializer.data
# Test If-Modified-Since and If-None-Match preconditions
# https://www.hl7.org/fhir/http.html#cread
etag, last_modified = self.get_conditional_args(res_data)
response = get_conditional_response(request, etag, last_modified)
if response is not None:
return response
# Set revelant header on the response if request method is safe
headers = self.get_conditional_headers(res_data)
return Response(
data=res_data,
status=status.HTTP_200_OK,
headers=headers,
)
def etag_func(self, data) -> str:
return 'W/"%s"' % data['meta']['versionId']
def last_modified_func(self, data) -> str:
dt = dateutil.parser.parse(data['meta']['lastUpdated'])
return calendar.timegm(dt.utctimetuple())
def get_conditional_args(self, data: dict):
etag = self.etag_func(data)
last_modified = self.last_modified_func(data)
return (
etag,
last_modified,
)
def get_conditional_headers(self, data):
etag, last_modified = self.get_conditional_args(data)
headers = dict()
if etag:
headers['ETag'] = etag
if last_modified:
headers['Last-Modified'] = http_date(last_modified)
return headers
| [
"django.utils.cache.get_conditional_response",
"rest_framework.response.Response",
"django.utils.http.http_date"
] | [((751, 805), 'django.utils.cache.get_conditional_response', 'get_conditional_response', (['request', 'etag', 'last_modified'], {}), '(request, etag, last_modified)\n', (775, 805), False, 'from django.utils.cache import get_conditional_response\n'), ((1013, 1080), 'rest_framework.response.Response', 'Response', ([], {'data': 'res_data', 'status': 'status.HTTP_200_OK', 'headers': 'headers'}), '(data=res_data, status=status.HTTP_200_OK, headers=headers)\n', (1021, 1080), False, 'from rest_framework.response import Response\n'), ((1844, 1868), 'django.utils.http.http_date', 'http_date', (['last_modified'], {}), '(last_modified)\n', (1853, 1868), False, 'from django.utils.http import http_date\n')] |
#!/usr/bin/env python
import json
from support import parse_states
import sys
import xapian
def index(datapath, dbpath):
# Create or open the database we're going to be writing to.
db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN)
# Set up a TermGenerator that we'll use in indexing.
termgenerator = xapian.TermGenerator()
termgenerator.set_stemmer(xapian.Stem("en"))
for fields in parse_states(datapath):
# 'fields' is a dictionary mapping from field name to value.
# Pick out the fields we're going to index.
name = fields.get('name', u'')
description = fields.get('description', u'')
motto = fields.get('motto', u'')
admitted = fields.get('admitted', None)
population = fields.get('population', None)
order = fields.get('order', u'')
# We make a document and tell the term generator to use this.
doc = xapian.Document()
termgenerator.set_document(doc)
# index each field with a suitable prefix
termgenerator.index_text(name, 1, 'S')
termgenerator.index_text(description, 1, 'XD')
termgenerator.index_text(motto, 1, 'XM')
# Index fields without prefixes for general search.
termgenerator.index_text(name)
termgenerator.increase_termpos()
termgenerator.index_text(description)
termgenerator.increase_termpos()
termgenerator.index_text(motto)
# Add document values.
if admitted is not None:
doc.add_value(1, xapian.sortable_serialise(int(admitted[:4])))
doc.add_value(2, admitted) # YYYYMMDD
if population is not None:
doc.add_value(3, xapian.sortable_serialise(int(population)))
### Start of example code.
midlat = fields['midlat']
midlon = fields['midlon']
if midlat and midlon:
doc.add_value(4, "%f,%f" % (float(midlat), float(midlon)))
### End of example code.
# Store all the fields for display purposes.
doc.set_data(json.dumps(fields))
# We use the order to ensure each object ends up in the
# database only once no matter how many times we run the
# indexer.
idterm = u"Q" + order
doc.add_boolean_term(idterm)
db.replace_document(idterm, doc)
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| [
"xapian.Stem",
"support.parse_states",
"json.dumps",
"xapian.WritableDatabase",
"sys.exit",
"xapian.TermGenerator",
"xapian.Document"
] | [((196, 253), 'xapian.WritableDatabase', 'xapian.WritableDatabase', (['dbpath', 'xapian.DB_CREATE_OR_OPEN'], {}), '(dbpath, xapian.DB_CREATE_OR_OPEN)\n', (219, 253), False, 'import xapian\n'), ((332, 354), 'xapian.TermGenerator', 'xapian.TermGenerator', ([], {}), '()\n', (352, 354), False, 'import xapian\n'), ((423, 445), 'support.parse_states', 'parse_states', (['datapath'], {}), '(datapath)\n', (435, 445), False, 'from support import parse_states\n'), ((2407, 2418), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2415, 2418), False, 'import sys\n'), ((385, 402), 'xapian.Stem', 'xapian.Stem', (['"""en"""'], {}), "('en')\n", (396, 402), False, 'import xapian\n'), ((927, 944), 'xapian.Document', 'xapian.Document', ([], {}), '()\n', (942, 944), False, 'import xapian\n'), ((2049, 2067), 'json.dumps', 'json.dumps', (['fields'], {}), '(fields)\n', (2059, 2067), False, 'import json\n')] |
import logging
import time
from selenium.common import exceptions
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common import action_chains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium_utils import exception
logger = logging.getLogger(__name__)
def hover_over_element(driver: WebDriver, element):
"""Moves the mouse pointer to the element and hovers"""
action_chains.ActionChains(driver).move_to_element(element).perform()
def wait_until_stops_moving(element, wait_seconds=1):
"""Waits until the element stops moving
Args:
selenium.webdriver.remote.webelement.WebElement
"""
prev_location = None
timer_begin = time.time()
while prev_location != element.location:
prev_location = element.location
time.sleep(0.1)
if time.time() - timer_begin > wait_seconds:
raise exception.ElementMovingTimeout
def get_when_visible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.presence_of_element_located(locator))
def wait_until_condition(driver: WebDriver, condition, wait_seconds=1):
"""Wait until given expected condition is met"""
WebDriverWait(
driver,
wait_seconds).until(condition)
def wait_until_not_present(driver: WebDriver, locator):
"""Wait until no element(-s) for locator given are present in the DOM."""
wait_until_condition(driver, lambda d: len(d.find_elements(*locator)) == 0)
def get_when_all_visible(driver: WebDriver, locator, wait_seconds=1):
"""Return WebElements by locator when all of them are visible.
Args:
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElements
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.visibility_of_any_elements_located(locator))
def get_when_clickable(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.element_to_be_clickable(locator))
def get_when_invisible(driver: WebDriver, locator, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
Returns:
selenium.webdriver.remote.webelement.WebElement
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.invisibility_of_element_located(locator))
def wait_for_element_text(driver: WebDriver, locator, text, wait_seconds=1):
"""
Args:
driver (base.CustomDriver)
locator (tuple)
text (str)
"""
return WebDriverWait(
driver,
wait_seconds) \
.until(EC.text_to_be_present_in_element(locator, text))
def is_value_in_attr(element, attr="class", value="active"):
"""Checks if the attribute value is present for given attribute
Args:
element (selenium.webdriver.remote.webelement.WebElement)
attr (basestring): attribute name e.g. "class"
value (basestring): value in the class attribute that
indicates the element is now active/opened
Returns:
bool
"""
attributes = element.get_attribute(attr)
return value in attributes.split()
def click_on_staleable_element(driver: WebDriver, el_locator, wait_seconds=1):
"""Clicks an element that can be modified between the time we find it and when we click on it"""
time_start = time.time()
while time.time() - time_start < wait_seconds:
try:
driver.find_element(*el_locator).click()
break
except exceptions.StaleElementReferenceException as e:
logger.error(str(e))
time.sleep(0.1)
else:
raise exception.ElementNotFound(el_locator)
def scroll_into_view(driver: WebDriver, element, offset_pixels=0):
"""Scrolls page to element using JS"""
driver.execute_script("return arguments[0].scrollIntoView();", element)
# compensate for the header
driver.execute_script("window.scrollBy(0, -{});".format(offset_pixels))
return element
| [
"logging.getLogger",
"selenium.webdriver.support.ui.WebDriverWait",
"selenium.webdriver.support.expected_conditions.invisibility_of_element_located",
"selenium.webdriver.support.expected_conditions.text_to_be_present_in_element",
"time.sleep",
"selenium.webdriver.support.expected_conditions.visibility_of_... | [((346, 373), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (363, 373), False, 'import logging\n'), ((780, 791), 'time.time', 'time.time', ([], {}), '()\n', (789, 791), False, 'import time\n'), ((3827, 3838), 'time.time', 'time.time', ([], {}), '()\n', (3836, 3838), False, 'import time\n'), ((887, 902), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (897, 902), False, 'import time\n'), ((1305, 1344), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['locator'], {}), '(locator)\n', (1335, 1344), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2094, 2140), 'selenium.webdriver.support.expected_conditions.visibility_of_any_elements_located', 'EC.visibility_of_any_elements_located', (['locator'], {}), '(locator)\n', (2131, 2140), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2443, 2478), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['locator'], {}), '(locator)\n', (2469, 2478), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2781, 2824), 'selenium.webdriver.support.expected_conditions.invisibility_of_element_located', 'EC.invisibility_of_element_located', (['locator'], {}), '(locator)\n', (2815, 2824), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((3092, 3139), 'selenium.webdriver.support.expected_conditions.text_to_be_present_in_element', 'EC.text_to_be_present_in_element', (['locator', 'text'], {}), '(locator, text)\n', (3124, 3139), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((4123, 4160), 'selenium_utils.exception.ElementNotFound', 'exception.ElementNotFound', (['el_locator'], {}), '(el_locator)\n', (4148, 4160), False, 'from selenium_utils import exception\n'), ((1235, 1270), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (1248, 1270), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1477, 1512), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (1490, 1512), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2024, 2059), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (2037, 2059), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2373, 2408), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (2386, 2408), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2711, 2746), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (2724, 2746), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3022, 3057), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'wait_seconds'], {}), '(driver, wait_seconds)\n', (3035, 3057), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((3850, 3861), 'time.time', 'time.time', ([], {}), '()\n', (3859, 3861), False, 'import time\n'), ((915, 926), 'time.time', 'time.time', ([], {}), '()\n', (924, 926), False, 'import time\n'), ((4083, 4098), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4093, 4098), False, 'import time\n'), ((492, 526), 'selenium.webdriver.common.action_chains.ActionChains', 'action_chains.ActionChains', (['driver'], {}), '(driver)\n', (518, 526), False, 'from selenium.webdriver.common import action_chains\n')] |
import os
import requests
import psycopg2
import db_lib as db
from app import send_message, log
from apscheduler.schedulers.blocking import BlockingScheduler
DATABASE_URL = os.environ['DATABASE_URL']
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
def kitchen_reminder():
# fetch current status
status = db.getStatus(conn)
# if notify is disabled, no operation needed
if status == "DISABLED":
log("kitchen_reminder trigger; bot NOTIFY_STATUS is disabled")
return "ok", 200
currentBoyNum = db.getBoyNum(conn)
# if first day has passed
if currentBoyNum == 1:
# increment day
currentBoy = db.getBoy(conn)
db.changeDay(conn, currentBoy)
# if second day has passed
elif currentBoyNum == 2:
# pass responsiblity
currentBoy = db.getBoy(conn)
nextBoy = db.getNextBoy(conn)
db.updateBoy(conn, currentBoy, nextBoy)
# send message to new kitchen boy
msg = "{}, it is your kitchen day!".format(db.getNickname(conn, nextBoy))
send_message(msg, [nextBoy])
else:
log("Error: getBoyNum() returned an unexpected value: {}".format(currentBoyNum))
return "ok", 200
def rent_reminder():
msg = "Don't forget to pay rent!"
send_message(msg, db.getAll(conn))
return "ok", 200
sched = BlockingScheduler()
sched.add_job(kitchen_reminder, 'cron', hour=0, minute=0)
sched.add_job(rent_reminder, 'cron', day=1)
sched.start()
| [
"psycopg2.connect",
"db_lib.getBoy",
"db_lib.changeDay",
"db_lib.getBoyNum",
"db_lib.getNextBoy",
"db_lib.updateBoy",
"db_lib.getNickname",
"db_lib.getStatus",
"apscheduler.schedulers.blocking.BlockingScheduler",
"app.send_message",
"app.log",
"db_lib.getAll"
] | [((209, 258), 'psycopg2.connect', 'psycopg2.connect', (['DATABASE_URL'], {'sslmode': '"""require"""'}), "(DATABASE_URL, sslmode='require')\n", (225, 258), False, 'import psycopg2\n'), ((1230, 1249), 'apscheduler.schedulers.blocking.BlockingScheduler', 'BlockingScheduler', ([], {}), '()\n', (1247, 1249), False, 'from apscheduler.schedulers.blocking import BlockingScheduler\n'), ((319, 337), 'db_lib.getStatus', 'db.getStatus', (['conn'], {}), '(conn)\n', (331, 337), True, 'import db_lib as db\n'), ((513, 531), 'db_lib.getBoyNum', 'db.getBoyNum', (['conn'], {}), '(conn)\n', (525, 531), True, 'import db_lib as db\n'), ((413, 475), 'app.log', 'log', (['"""kitchen_reminder trigger; bot NOTIFY_STATUS is disabled"""'], {}), "('kitchen_reminder trigger; bot NOTIFY_STATUS is disabled')\n", (416, 475), False, 'from app import send_message, log\n'), ((618, 633), 'db_lib.getBoy', 'db.getBoy', (['conn'], {}), '(conn)\n', (627, 633), True, 'import db_lib as db\n'), ((636, 666), 'db_lib.changeDay', 'db.changeDay', (['conn', 'currentBoy'], {}), '(conn, currentBoy)\n', (648, 666), True, 'import db_lib as db\n'), ((1186, 1201), 'db_lib.getAll', 'db.getAll', (['conn'], {}), '(conn)\n', (1195, 1201), True, 'import db_lib as db\n'), ((763, 778), 'db_lib.getBoy', 'db.getBoy', (['conn'], {}), '(conn)\n', (772, 778), True, 'import db_lib as db\n'), ((791, 810), 'db_lib.getNextBoy', 'db.getNextBoy', (['conn'], {}), '(conn)\n', (804, 810), True, 'import db_lib as db\n'), ((813, 852), 'db_lib.updateBoy', 'db.updateBoy', (['conn', 'currentBoy', 'nextBoy'], {}), '(conn, currentBoy, nextBoy)\n', (825, 852), True, 'import db_lib as db\n'), ((970, 998), 'app.send_message', 'send_message', (['msg', '[nextBoy]'], {}), '(msg, [nextBoy])\n', (982, 998), False, 'from app import send_message, log\n'), ((937, 966), 'db_lib.getNickname', 'db.getNickname', (['conn', 'nextBoy'], {}), '(conn, nextBoy)\n', (951, 966), True, 'import db_lib as db\n')] |
# -*- coding: utf-8 -*-
# Module: default
# Author: asciidisco
# Created on: 24.07.2017
# License: MIT https://goo.gl/WA1kby
"""Setup"""
from __future__ import unicode_literals
from os.path import abspath, dirname, join
from re import search
from sys import exit, version, version_info
from setuptools import find_packages, setup
REQUIRED_PYTHON_VERSION = (2, 7)
PACKAGES = find_packages()
INSTALL_DEPENDENCIES = []
SETUP_DEPENDENCIES = []
TEST_DEPENDENCIES = [
'nose',
'Kodistubs',
'httpretty',
'mock',
]
EXTRA_DEPENDENCIES = {
'dev': [
'nose',
'flake8',
'codeclimate-test-reporter',
'pylint',
'mccabe',
'pycodestyle',
'pyflakes',
'Kodistubs',
'httpretty',
'mock',
'requests',
'beautifulsoup4',
'pyDes',
'radon',
'Sphinx',
'sphinx_rtd_theme',
'm2r',
'kodi-release-helper',
'dennis',
'blessings',
'demjson',
'restructuredtext_lint',
'yamllint',
]
}
def get_addon_data():
"""Loads the Kodi plugin data from addon.xml"""
root_dir = dirname(abspath(__file__))
pathname = join(root_dir, 'addon.xml')
with open(pathname, 'rb') as addon_xml:
addon_xml_contents = addon_xml.read()
_id = search(
r'(?<!xml )id="(.+?)"',
addon_xml_contents).group(1)
author = search(
r'(?<!xml )provider-name="(.+?)"',
addon_xml_contents).group(1)
name = search(
r'(?<!xml )name="(.+?)"',
addon_xml_contents).group(1)
version = search(
r'(?<!xml )version="(.+?)"',
addon_xml_contents).group(1)
desc = search(
r'(?<!xml )description lang="en_GB">(.+?)<',
addon_xml_contents).group(1)
email = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
source = search(
r'(?<!xml )email>(.+?)<',
addon_xml_contents).group(1)
return {
'id': _id,
'author': author,
'name': name,
'version': version,
'desc': desc,
'email': email,
'source': source,
}
if version_info < REQUIRED_PYTHON_VERSION:
exit('Python >= 2.7 is required. Your version:\n{0}'.format(version))
if __name__ == '__main__':
ADDON_DATA = get_addon_data()
setup(
name=ADDON_DATA.get('name'),
version=ADDON_DATA.get('version'),
author=ADDON_DATA.get('author'),
author_email=ADDON_DATA.get('email'),
description=ADDON_DATA.get('desc'),
packages=PACKAGES,
include_package_data=True,
install_requires=INSTALL_DEPENDENCIES,
setup_requires=SETUP_DEPENDENCIES,
tests_require=TEST_DEPENDENCIES,
extras_require=EXTRA_DEPENDENCIES,
test_suite='nose.collector',
)
| [
"os.path.abspath",
"setuptools.find_packages",
"os.path.join",
"re.search"
] | [((377, 392), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (390, 392), False, 'from setuptools import find_packages, setup\n'), ((1193, 1220), 'os.path.join', 'join', (['root_dir', '"""addon.xml"""'], {}), "(root_dir, 'addon.xml')\n", (1197, 1220), False, 'from os.path import abspath, dirname, join\n'), ((1159, 1176), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (1166, 1176), False, 'from os.path import abspath, dirname, join\n'), ((1325, 1374), 're.search', 'search', (['"""(?<!xml )id="(.+?)\\""""', 'addon_xml_contents'], {}), '(\'(?<!xml )id="(.+?)"\', addon_xml_contents)\n', (1331, 1374), False, 'from re import search\n'), ((1427, 1487), 're.search', 'search', (['"""(?<!xml )provider-name="(.+?)\\""""', 'addon_xml_contents'], {}), '(\'(?<!xml )provider-name="(.+?)"\', addon_xml_contents)\n', (1433, 1487), False, 'from re import search\n'), ((1538, 1589), 're.search', 'search', (['"""(?<!xml )name="(.+?)\\""""', 'addon_xml_contents'], {}), '(\'(?<!xml )name="(.+?)"\', addon_xml_contents)\n', (1544, 1589), False, 'from re import search\n'), ((1643, 1697), 're.search', 'search', (['"""(?<!xml )version="(.+?)\\""""', 'addon_xml_contents'], {}), '(\'(?<!xml )version="(.+?)"\', addon_xml_contents)\n', (1649, 1697), False, 'from re import search\n'), ((1748, 1818), 're.search', 'search', (['"""(?<!xml )description lang="en_GB">(.+?)<"""', 'addon_xml_contents'], {}), '(\'(?<!xml )description lang="en_GB">(.+?)<\', addon_xml_contents)\n', (1754, 1818), False, 'from re import search\n'), ((1870, 1921), 're.search', 'search', (['"""(?<!xml )email>(.+?)<"""', 'addon_xml_contents'], {}), "('(?<!xml )email>(.+?)<', addon_xml_contents)\n", (1876, 1921), False, 'from re import search\n'), ((1974, 2025), 're.search', 'search', (['"""(?<!xml )email>(.+?)<"""', 'addon_xml_contents'], {}), "('(?<!xml )email>(.+?)<', addon_xml_contents)\n", (1980, 2025), False, 'from re import search\n')] |
import numpy as np
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow.keras.models import Model
import tensorflow as tf
from PIL import Image
from utils_rtp import ProMP
class Predictor:
def __init__(self, encoder_model_path, predictor_model_path):
self.all_phi = self.promp_train()
encoder_model = tf.keras.models.load_model(encoder_model_path)
self.encoder = Model(encoder_model.input, encoder_model.get_layer("bottleneck").output)
self.exp_model = tf.keras.models.load_model(predictor_model_path, compile=False)
def promp_train(self):
phi = ProMP().basis_func_gauss_glb()
zeros = np.zeros([phi.shape[0], 8])
h1 = np.hstack((phi, zeros, zeros, zeros, zeros, zeros, zeros))
h2 = np.hstack((zeros, phi, zeros, zeros, zeros, zeros, zeros))
h3 = np.hstack((zeros, zeros, phi, zeros, zeros, zeros, zeros))
h4 = np.hstack((zeros, zeros, zeros, phi, zeros, zeros, zeros))
h5 = np.hstack((zeros, zeros, zeros, zeros, phi, zeros, zeros))
h6 = np.hstack((zeros, zeros, zeros, zeros, zeros, phi, zeros))
h7 = np.hstack((zeros, zeros, zeros, zeros, zeros, zeros, phi))
vstack = np.vstack((h1, h2, h3, h4, h5, h6, h7))
vstack = tf.cast(vstack, tf.float32)
return vstack
def preprocess_image(self, image):
return np.asarray(image.resize((256, 256)))
def predict(self, image_numpy):
# image_numpy = np.expand_dims(image_numpy, axis=0)
latent_img = self.encoder.predict(image_numpy/255)
q_val_pred = self.exp_model.predict(latent_img)
traj_pred = np.matmul(self.all_phi, np.transpose(q_val_pred)).squeeze()
return traj_pred #np.reshape(traj_pred, (-1, 150))
if __name__ == "__main__":
ENCODED_MODEL_PATH = "/home/arshad/Documents/reach_to_palpate_validation_models/encoded_model_regions"
PREDICTOR_MODEL = "/home/arshad/Documents/reach_to_palpate_validation_models/model_cnn_rgb_1"
image = np.load( "/home/arshad/catkin_ws/image_xy_rtp.npy" )
predictor = Predictor(ENCODED_MODEL_PATH, PREDICTOR_MODEL)
traj = predictor.predict(image)
np.save("/home/arshad/catkin_ws/predicted_joints_values_rtp.npy", traj)
print ("\n Predicted ProMPs weights for RTP task. Joint trajectory is saved in the file. \n Press 'p' to display the trajectory...")
| [
"tensorflow.cast",
"numpy.transpose",
"utils_rtp.ProMP",
"numpy.hstack",
"numpy.zeros",
"tensorflow.keras.models.load_model",
"numpy.vstack",
"numpy.load",
"numpy.save"
] | [((2017, 2067), 'numpy.load', 'np.load', (['"""/home/arshad/catkin_ws/image_xy_rtp.npy"""'], {}), "('/home/arshad/catkin_ws/image_xy_rtp.npy')\n", (2024, 2067), True, 'import numpy as np\n'), ((2175, 2246), 'numpy.save', 'np.save', (['"""/home/arshad/catkin_ws/predicted_joints_values_rtp.npy"""', 'traj'], {}), "('/home/arshad/catkin_ws/predicted_joints_values_rtp.npy', traj)\n", (2182, 2246), True, 'import numpy as np\n'), ((347, 393), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['encoder_model_path'], {}), '(encoder_model_path)\n', (373, 393), True, 'import tensorflow as tf\n'), ((515, 578), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['predictor_model_path'], {'compile': '(False)'}), '(predictor_model_path, compile=False)\n', (541, 578), True, 'import tensorflow as tf\n'), ((669, 696), 'numpy.zeros', 'np.zeros', (['[phi.shape[0], 8]'], {}), '([phi.shape[0], 8])\n', (677, 696), True, 'import numpy as np\n'), ((710, 768), 'numpy.hstack', 'np.hstack', (['(phi, zeros, zeros, zeros, zeros, zeros, zeros)'], {}), '((phi, zeros, zeros, zeros, zeros, zeros, zeros))\n', (719, 768), True, 'import numpy as np\n'), ((782, 840), 'numpy.hstack', 'np.hstack', (['(zeros, phi, zeros, zeros, zeros, zeros, zeros)'], {}), '((zeros, phi, zeros, zeros, zeros, zeros, zeros))\n', (791, 840), True, 'import numpy as np\n'), ((854, 912), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, phi, zeros, zeros, zeros, zeros)'], {}), '((zeros, zeros, phi, zeros, zeros, zeros, zeros))\n', (863, 912), True, 'import numpy as np\n'), ((926, 984), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, phi, zeros, zeros, zeros)'], {}), '((zeros, zeros, zeros, phi, zeros, zeros, zeros))\n', (935, 984), True, 'import numpy as np\n'), ((998, 1056), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, phi, zeros, zeros)'], {}), '((zeros, zeros, zeros, zeros, phi, zeros, zeros))\n', (1007, 1056), True, 'import numpy as np\n'), ((1070, 1128), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, zeros, phi, zeros)'], {}), '((zeros, zeros, zeros, zeros, zeros, phi, zeros))\n', (1079, 1128), True, 'import numpy as np\n'), ((1142, 1200), 'numpy.hstack', 'np.hstack', (['(zeros, zeros, zeros, zeros, zeros, zeros, phi)'], {}), '((zeros, zeros, zeros, zeros, zeros, zeros, phi))\n', (1151, 1200), True, 'import numpy as np\n'), ((1219, 1258), 'numpy.vstack', 'np.vstack', (['(h1, h2, h3, h4, h5, h6, h7)'], {}), '((h1, h2, h3, h4, h5, h6, h7))\n', (1228, 1258), True, 'import numpy as np\n'), ((1276, 1303), 'tensorflow.cast', 'tf.cast', (['vstack', 'tf.float32'], {}), '(vstack, tf.float32)\n', (1283, 1303), True, 'import tensorflow as tf\n'), ((621, 628), 'utils_rtp.ProMP', 'ProMP', ([], {}), '()\n', (626, 628), False, 'from utils_rtp import ProMP\n'), ((1675, 1699), 'numpy.transpose', 'np.transpose', (['q_val_pred'], {}), '(q_val_pred)\n', (1687, 1699), True, 'import numpy as np\n')] |
#
# Общие функции для всех парсеров
#
# Автор: <NAME>
# Лицензия: MIT License
#
from time import sleep
import requests
def get_htmls(urls):
"""
Получает список URL-адресов
Возвращает список из всех полученных HTML документов
:param urls: Список URL-адресов
:type urls: list
:return: Возвращаем список HTML-документов
"""
htmls = [] # Готовим болванку для возвращаемого значения
for url in urls: # Прогоняем все URL из списка
html = get_html(url) # Получаем HTML по полученному URL из списка
htmls.append(html) # Добавляем полученный HTML в возвращаемый список
sleep(1)
return htmls # Возвращаем список в котором каждый элемент - это HTML документ
def get_html(url):
"""
Получает URL-адрес
Возвращает тело HTML документа
:param url: URL-адрес
:type url: str
:return: Возвращаем HTML-документ
"""
print(f"""get_html url={url}""")
r = requests.get(url, headers={'User-Agent': 'Custom'}) # Создаем объект web-страницы по полученному url
print(r) # Ответ от сервера <Response [200]>
return r.text # Возвращаем тело HTML документа
if __name__ == '__main__':
pass | [
"time.sleep",
"requests.get"
] | [((990, 1041), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-Agent': 'Custom'}"}), "(url, headers={'User-Agent': 'Custom'})\n", (1002, 1041), False, 'import requests\n'), ((654, 662), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (659, 662), False, 'from time import sleep\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
from config import config
def Conv(**kwargs):
body = mx.sym.Convolution(**kwargs)
return body
def Act(data, act_type, name):
if act_type=='prelu':
body = mx.sym.LeakyReLU(data = data, act_type='prelu', name = name)
else:
body = mx.symbol.Activation(data=data, act_type=act_type, name=name)
return body
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True, dcn=False, name=''):
bn_mom = config.bn_mom
workspace = config.workspace
if not dcn:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name+'_conv')
else:
conv_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = data,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=data, offset=conv_offset,
num_filter=num_filter, pad=(1,1), kernel=(3,3), num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)
bn = mx.symbol.BatchNorm(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name+'_bn')
if with_act:
act = Act(bn, act_type, name=name+'_relu')
#act = mx.symbol.Activation(
# data=bn, act_type=act_type, attr=mirror_attr, name=name+'_relu')
return act
else:
return bn
def conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution(data=act1, num_filter=int(num_filter*0.5), kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
conv2 = Conv(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution(data=act2, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
conv3 = Conv(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0), no_bias=True,
workspace=workspace, name=name + '_conv3')
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution(data=act3, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
#if binarize:
# conv3 = mx.sym.BatchNorm(data=conv3, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
shortcut = mx.sym.QConvolution(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv3 + shortcut
def conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilation, **kwargs):
bit = 1
ACT_BIT = config.ACT_BIT
bn_mom = config.bn_mom
workspace = config.workspace
memonger = config.memonger
#print('in unit2')
# the same as https://github.com/facebook/fb.resnet.torch#notes, a bit difference with origin paper
bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn1')
if not binarize:
act1 = Act(data=bn1, act_type='relu', name=name + '_relu1')
if not dcn:
conv1 = Conv(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv1')
else:
conv1_offset = mx.symbol.Convolution(name=name+'_conv1_offset', data = act1,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv1 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv1', data=act1, offset=conv1_offset,
num_filter=int(num_filter*0.5), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act1 = mx.sym.QActivation(data=bn1, act_bit=ACT_BIT, name=name + '_relu1', backward_only=True)
conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=int(num_filter*0.5), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv1', act_bit=ACT_BIT, weight_bit=bit)
bn2 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn2')
if not binarize:
act2 = Act(data=bn2, act_type='relu', name=name + '_relu2')
if not dcn:
conv2 = Conv(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv2')
else:
conv2_offset = mx.symbol.Convolution(name=name+'_conv2_offset', data = act2,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv2 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv2', data=act2, offset=conv2_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act2 = mx.sym.QActivation(data=bn2, act_bit=ACT_BIT, name=name + '_relu2', backward_only=True)
conv2 = mx.sym.QConvolution_v1(data=act2, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv2', act_bit=ACT_BIT, weight_bit=bit)
bn3 = mx.sym.BatchNorm(data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn3')
if not binarize:
act3 = Act(data=bn3, act_type='relu', name=name + '_relu3')
if not dcn:
conv3 = Conv(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(dilation,dilation), dilate=(dilation,dilation),
no_bias=True, workspace=workspace, name=name + '_conv3')
else:
conv3_offset = mx.symbol.Convolution(name=name+'_conv3_offset', data = act3,
num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv3 = mx.contrib.symbol.DeformableConvolution(name=name+'_conv3', data=act3, offset=conv3_offset,
num_filter=int(num_filter*0.25), pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=True)
else:
act3 = mx.sym.QActivation(data=bn3, act_bit=ACT_BIT, name=name + '_relu3', backward_only=True)
conv3 = mx.sym.QConvolution_v1(data=act3, num_filter=int(num_filter*0.25), kernel=(3,3), stride=(1,1), pad=(1,1),
no_bias=True, workspace=workspace, name=name + '_conv3', act_bit=ACT_BIT, weight_bit=bit)
conv4 = mx.symbol.Concat(*[conv1, conv2, conv3])
if binarize:
conv4 = mx.sym.BatchNorm(data=conv4, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_bn4')
if dim_match:
shortcut = data
else:
if not binarize:
shortcut = Conv(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, no_bias=True,
workspace=workspace, name=name+'_sc')
else:
#assert(False)
shortcut = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=stride, pad=(0,0),
no_bias=True, workspace=workspace, name=name + '_sc', act_bit=ACT_BIT, weight_bit=bit)
shortcut = mx.sym.BatchNorm(data=shortcut, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + '_sc_bn')
if memonger:
shortcut._set_attr(mirror_stage='True')
return conv4 + shortcut
#return bn4 + shortcut
#return act4 + shortcut
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
tower_conv = ConvFactory(net, 192, (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, 129, (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(1, 2), name=name+'_conv1_1')
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(2, 1), name=name+'_conv1_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={}, name=''):
M = 1.0
tower_conv = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv')
tower_conv1_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv1_0')
tower_conv1_1 = ConvFactory(tower_conv1_0, int(input_num_channels*0.25*M), (3, 3), pad=(1, 1), name=name+'_conv1_1')
tower_conv2_0 = ConvFactory(net, int(input_num_channels*0.25*M), (1, 1), name=name+'_conv2_0')
tower_conv2_1 = ConvFactory(tower_conv2_0, int(input_num_channels*0.375*M), (3, 3), pad=(1, 1), name=name+'_conv2_1')
tower_conv2_2 = ConvFactory(tower_conv2_1, int(input_num_channels*0.5*M), (3, 3), pad=(1, 1), name=name+'_conv2_2')
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = ConvFactory(
tower_mixed, input_num_channels, (1, 1), with_act=False, name=name+'_conv_out')
net = net+scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
assert not binarize
if stride[0]>1 or not dim_match:
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
conv4 = block35(data, num_filter, name=name+'_block35')
return conv4
def conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs):
workspace = config.workspace
if stride[0]>1 or not dim_match:
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate, **kwargs)
cab = CAB(data, num_filter, 1, 4, workspace, name, dilate, 1)
return cab.get()
def conv_block(data, num_filter, stride, dim_match, name, binarize, dcn, dilate):
if config.net_block=='resnet':
return conv_resnet(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='inception':
return conv_inception(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='hpm':
return conv_hpm(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
elif config.net_block=='cab':
return conv_cab(data, num_filter, stride, dim_match, name, binarize, dcn, dilate)
#def lin(data, num_filter, workspace, name, binarize, dcn):
# bit = 1
# ACT_BIT = config.ACT_BIT
# bn_mom = config.bn_mom
# workspace = config.workspace
# if not binarize:
# if not dcn:
# conv1 = Conv(data=data, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv')
# bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# return act1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1_offset = mx.symbol.Convolution(name=name+'_conv_offset', data = act1,
# num_filter=18, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
# conv1 = mx.contrib.symbol.DeformableConvolution(name=name+"_conv", data=act1, offset=conv1_offset,
# num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=1, stride=(1, 1), dilate=(1, 1), no_bias=False)
# #conv1 = Conv(data=act1, num_filter=num_filter, kernel=(3,3), stride=(1,1), pad=(1,1),
# # no_bias=False, workspace=workspace, name=name + '_conv')
# return conv1
# else:
# bn1 = mx.sym.BatchNorm(data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
# act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
# conv1 = mx.sym.QConvolution_v1(data=act1, num_filter=num_filter, kernel=(1,1), stride=(1,1), pad=(0,0),
# no_bias=True, workspace=workspace, name=name + '_conv', act_bit=ACT_BIT, weight_bit=bit)
# conv1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn2')
# return conv1
def lin3(data, num_filter, workspace, name, k, g=1, d=1):
bn_mom = config.bn_mom
workspace = config.workspace
if k!=3:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=((k-1)//2,(k-1)//2), num_group=g,
no_bias=True, workspace=workspace, name=name + '_conv')
else:
conv1 = Conv(data=data, num_filter=num_filter, kernel=(k,k), stride=(1,1), pad=(d,d), num_group=g, dilate=(d, d),
no_bias=True, workspace=workspace, name=name + '_conv')
bn1 = mx.sym.BatchNorm(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + '_bn')
act1 = Act(data=bn1, act_type='relu', name=name + '_relu')
ret = act1
return ret
class CAB:
def __init__(self, data, nFilters, nModules, n, workspace, name, dilate, group):
self.data = data
self.nFilters = nFilters
self.nModules = nModules
self.n = n
self.workspace = workspace
self.name = name
self.dilate = dilate
self.group = group
self.sym_map = {}
def get_output(self, w, h):
key = (w, h)
if key in self.sym_map:
return self.sym_map[key]
ret = None
if h==self.n:
if w==self.n:
ret = (self.data, self.nFilters)
else:
x = self.get_output(w+1, h)
f = int(x[1]*0.5)
if w!=self.n-1:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, 1)
else:
body = lin3(x[0], f, self.workspace, "%s_w%d_h%d_1"%(self.name, w, h), 3, self.group, self.dilate)
ret = (body,f)
else:
x = self.get_output(w+1, h+1)
y = self.get_output(w, h+1)
if h%2==1 and h!=w:
xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
#xbody = xbody+x[0]
else:
xbody = x[0]
#xbody = x[0]
#xbody = lin3(x[0], x[1], self.workspace, "%s_w%d_h%d_2"%(self.name, w, h), 3, x[1])
if w==0:
ybody = lin3(y[0], y[1], self.workspace, "%s_w%d_h%d_3"%(self.name, w, h), 3, self.group)
else:
ybody = y[0]
ybody = mx.sym.concat(y[0], ybody, dim=1)
body = mx.sym.add_n(xbody,ybody, name="%s_w%d_h%d_add"%(self.name, w, h))
body = body/2
ret = (body, x[1])
self.sym_map[key] = ret
return ret
def get(self):
return self.get_output(1, 1)[0] | [
"mxnet.sym.QActivation",
"mxnet.sym.concat",
"mxnet.contrib.symbol.DeformableConvolution",
"mxnet.sym.Convolution",
"mxnet.symbol.Convolution",
"mxnet.sym.add_n",
"mxnet.sym.QConvolution_v1",
"mxnet.symbol.Activation",
"mxnet.sym.QConvolution",
"mxnet.symbol.BatchNorm",
"mxnet.sym.LeakyReLU",
... | [((207, 235), 'mxnet.sym.Convolution', 'mx.sym.Convolution', ([], {}), '(**kwargs)\n', (225, 235), True, 'import mxnet as mx\n'), ((1287, 1385), 'mxnet.symbol.BatchNorm', 'mx.symbol.BatchNorm', ([], {'data': 'conv', 'fix_gamma': '(False)', 'momentum': 'bn_mom', 'eps': '(2e-05)', 'name': "(name + '_bn')"}), "(data=conv, fix_gamma=False, momentum=bn_mom, eps=2e-05,\n name=name + '_bn')\n", (1306, 1385), True, 'import mxnet as mx\n'), ((1963, 2059), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'data', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn1')"}), "(data=data, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn1')\n", (1979, 2059), True, 'import mxnet as mx\n'), ((2693, 2790), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv1', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn2')"}), "(data=conv1, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn2')\n", (2709, 2790), True, 'import mxnet as mx\n'), ((3424, 3521), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv2', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn3')"}), "(data=conv2, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn3')\n", (3440, 3521), True, 'import mxnet as mx\n'), ((5216, 5312), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'data', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn1')"}), "(data=data, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn1')\n", (5232, 5312), True, 'import mxnet as mx\n'), ((6441, 6538), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv1', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn2')"}), "(data=conv1, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn2')\n", (6457, 6538), True, 'import mxnet as mx\n'), ((7670, 7767), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv2', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn3')"}), "(data=conv2, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn3')\n", (7686, 7767), True, 'import mxnet as mx\n'), ((8883, 8923), 'mxnet.symbol.Concat', 'mx.symbol.Concat', (['*[conv1, conv2, conv3]'], {}), '(*[conv1, conv2, conv3])\n', (8899, 8923), True, 'import mxnet as mx\n'), ((10302, 10348), 'mxnet.symbol.Concat', 'mx.symbol.Concat', (['*[tower_conv, tower_conv1_2]'], {}), '(*[tower_conv, tower_conv1_2])\n', (10318, 10348), True, 'import mxnet as mx\n'), ((11449, 11510), 'mxnet.symbol.Concat', 'mx.symbol.Concat', (['*[tower_conv, tower_conv1_1, tower_conv2_2]'], {}), '(*[tower_conv, tower_conv1_1, tower_conv2_2])\n', (11465, 11510), True, 'import mxnet as mx\n'), ((15575, 15671), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv1', 'fix_gamma': '(False)', 'momentum': 'bn_mom', 'eps': '(2e-05)', 'name': "(name + '_bn')"}), "(data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-05,\n name=name + '_bn')\n", (15591, 15671), True, 'import mxnet as mx\n'), ((323, 379), 'mxnet.sym.LeakyReLU', 'mx.sym.LeakyReLU', ([], {'data': 'data', 'act_type': '"""prelu"""', 'name': 'name'}), "(data=data, act_type='prelu', name=name)\n", (339, 379), True, 'import mxnet as mx\n'), ((407, 468), 'mxnet.symbol.Activation', 'mx.symbol.Activation', ([], {'data': 'data', 'act_type': 'act_type', 'name': 'name'}), '(data=data, act_type=act_type, name=name)\n', (427, 468), True, 'import mxnet as mx\n'), ((714, 872), 'mxnet.symbol.Convolution', 'mx.symbol.Convolution', ([], {'data': 'data', 'num_filter': 'num_filter', 'kernel': 'kernel', 'stride': 'stride', 'pad': 'pad', 'no_bias': '(True)', 'workspace': 'workspace', 'name': "(name + '_conv')"}), "(data=data, num_filter=num_filter, kernel=kernel,\n stride=stride, pad=pad, no_bias=True, workspace=workspace, name=name +\n '_conv')\n", (735, 872), True, 'import mxnet as mx\n'), ((906, 1027), 'mxnet.symbol.Convolution', 'mx.symbol.Convolution', ([], {'name': "(name + '_conv_offset')", 'data': 'data', 'num_filter': '(18)', 'pad': '(1, 1)', 'kernel': '(3, 3)', 'stride': '(1, 1)'}), "(name=name + '_conv_offset', data=data, num_filter=18,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n", (927, 1027), True, 'import mxnet as mx\n'), ((1055, 1273), 'mxnet.contrib.symbol.DeformableConvolution', 'mx.contrib.symbol.DeformableConvolution', ([], {'name': "(name + '_conv')", 'data': 'data', 'offset': 'conv_offset', 'num_filter': 'num_filter', 'pad': '(1, 1)', 'kernel': '(3, 3)', 'num_deformable_group': '(1)', 'stride': 'stride', 'dilate': '(1, 1)', 'no_bias': '(False)'}), "(name=name + '_conv', data=data,\n offset=conv_offset, num_filter=num_filter, pad=(1, 1), kernel=(3, 3),\n num_deformable_group=1, stride=stride, dilate=(1, 1), no_bias=False)\n", (1094, 1273), True, 'import mxnet as mx\n'), ((2356, 2447), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn1', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu1')", 'backward_only': '(True)'}), "(data=bn1, act_bit=ACT_BIT, name=name + '_relu1',\n backward_only=True)\n", (2374, 2447), True, 'import mxnet as mx\n'), ((3087, 3178), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn2', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu2')", 'backward_only': '(True)'}), "(data=bn2, act_bit=ACT_BIT, name=name + '_relu2',\n backward_only=True)\n", (3105, 3178), True, 'import mxnet as mx\n'), ((3809, 3900), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn3', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu3')", 'backward_only': '(True)'}), "(data=bn3, act_bit=ACT_BIT, name=name + '_relu3',\n backward_only=True)\n", (3827, 3900), True, 'import mxnet as mx\n'), ((3911, 4105), 'mxnet.sym.QConvolution', 'mx.sym.QConvolution', ([], {'data': 'act3', 'num_filter': 'num_filter', 'kernel': '(1, 1)', 'stride': '(1, 1)', 'pad': '(0, 0)', 'no_bias': '(True)', 'workspace': 'workspace', 'name': "(name + '_conv3')", 'act_bit': 'ACT_BIT', 'weight_bit': 'bit'}), "(data=act3, num_filter=num_filter, kernel=(1, 1), stride\n =(1, 1), pad=(0, 0), no_bias=True, workspace=workspace, name=name +\n '_conv3', act_bit=ACT_BIT, weight_bit=bit)\n", (3930, 4105), True, 'import mxnet as mx\n'), ((6101, 6192), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn1', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu1')", 'backward_only': '(True)'}), "(data=bn1, act_bit=ACT_BIT, name=name + '_relu1',\n backward_only=True)\n", (6119, 6192), True, 'import mxnet as mx\n'), ((7329, 7420), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn2', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu2')", 'backward_only': '(True)'}), "(data=bn2, act_bit=ACT_BIT, name=name + '_relu2',\n backward_only=True)\n", (7347, 7420), True, 'import mxnet as mx\n'), ((8540, 8631), 'mxnet.sym.QActivation', 'mx.sym.QActivation', ([], {'data': 'bn3', 'act_bit': 'ACT_BIT', 'name': "(name + '_relu3')", 'backward_only': '(True)'}), "(data=bn3, act_bit=ACT_BIT, name=name + '_relu3',\n backward_only=True)\n", (8558, 8631), True, 'import mxnet as mx\n'), ((8955, 9052), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'conv4', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_bn4')"}), "(data=conv4, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_bn4')\n", (8971, 9052), True, 'import mxnet as mx\n'), ((10529, 10596), 'mxnet.symbol.Activation', 'mx.symbol.Activation', ([], {'data': 'net', 'act_type': 'act_type', 'attr': 'mirror_attr'}), '(data=net, act_type=act_type, attr=mirror_attr)\n', (10549, 10596), True, 'import mxnet as mx\n'), ((11692, 11759), 'mxnet.symbol.Activation', 'mx.symbol.Activation', ([], {'data': 'net', 'act_type': 'act_type', 'attr': 'mirror_attr'}), '(data=net, act_type=act_type, attr=mirror_attr)\n', (11712, 11759), True, 'import mxnet as mx\n'), ((4548, 4739), 'mxnet.sym.QConvolution', 'mx.sym.QConvolution', ([], {'data': 'act1', 'num_filter': 'num_filter', 'kernel': '(1, 1)', 'stride': 'stride', 'pad': '(0, 0)', 'no_bias': '(True)', 'workspace': 'workspace', 'name': "(name + '_sc')", 'act_bit': 'ACT_BIT', 'weight_bit': 'bit'}), "(data=act1, num_filter=num_filter, kernel=(1, 1), stride\n =stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name +\n '_sc', act_bit=ACT_BIT, weight_bit=bit)\n", (4567, 4739), True, 'import mxnet as mx\n'), ((5691, 5813), 'mxnet.symbol.Convolution', 'mx.symbol.Convolution', ([], {'name': "(name + '_conv1_offset')", 'data': 'act1', 'num_filter': '(18)', 'pad': '(1, 1)', 'kernel': '(3, 3)', 'stride': '(1, 1)'}), "(name=name + '_conv1_offset', data=act1, num_filter=18,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n", (5712, 5813), True, 'import mxnet as mx\n'), ((6918, 7040), 'mxnet.symbol.Convolution', 'mx.symbol.Convolution', ([], {'name': "(name + '_conv2_offset')", 'data': 'act2', 'num_filter': '(18)', 'pad': '(1, 1)', 'kernel': '(3, 3)', 'stride': '(1, 1)'}), "(name=name + '_conv2_offset', data=act2, num_filter=18,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n", (6939, 7040), True, 'import mxnet as mx\n'), ((8129, 8251), 'mxnet.symbol.Convolution', 'mx.symbol.Convolution', ([], {'name': "(name + '_conv3_offset')", 'data': 'act3', 'num_filter': '(18)', 'pad': '(1, 1)', 'kernel': '(3, 3)', 'stride': '(1, 1)'}), "(name=name + '_conv3_offset', data=act3, num_filter=18,\n pad=(1, 1), kernel=(3, 3), stride=(1, 1))\n", (8150, 8251), True, 'import mxnet as mx\n'), ((9368, 9561), 'mxnet.sym.QConvolution_v1', 'mx.sym.QConvolution_v1', ([], {'data': 'act1', 'num_filter': 'num_filter', 'kernel': '(1, 1)', 'stride': 'stride', 'pad': '(0, 0)', 'no_bias': '(True)', 'workspace': 'workspace', 'name': "(name + '_sc')", 'act_bit': 'ACT_BIT', 'weight_bit': 'bit'}), "(data=act1, num_filter=num_filter, kernel=(1, 1),\n stride=stride, pad=(0, 0), no_bias=True, workspace=workspace, name=name +\n '_sc', act_bit=ACT_BIT, weight_bit=bit)\n", (9390, 9561), True, 'import mxnet as mx\n'), ((9604, 9706), 'mxnet.sym.BatchNorm', 'mx.sym.BatchNorm', ([], {'data': 'shortcut', 'fix_gamma': '(False)', 'eps': '(2e-05)', 'momentum': 'bn_mom', 'name': "(name + '_sc_bn')"}), "(data=shortcut, fix_gamma=False, eps=2e-05, momentum=bn_mom,\n name=name + '_sc_bn')\n", (9620, 9706), True, 'import mxnet as mx\n'), ((17386, 17419), 'mxnet.sym.concat', 'mx.sym.concat', (['y[0]', 'ybody'], {'dim': '(1)'}), '(y[0], ybody, dim=1)\n', (17399, 17419), True, 'import mxnet as mx\n'), ((17439, 17508), 'mxnet.sym.add_n', 'mx.sym.add_n', (['xbody', 'ybody'], {'name': "('%s_w%d_h%d_add' % (self.name, w, h))"}), "(xbody, ybody, name='%s_w%d_h%d_add' % (self.name, w, h))\n", (17451, 17508), True, 'import mxnet as mx\n')] |
'''
--- I M P O R T S T A T E M E N T S ---
'''
import coloredlogs, logging
coloredlogs.install()
import numpy as np
'''
=== S T A R T O F C L A S S E V A L M E T R I C ===
[About]
Object class for calculating average values.
[Init Args]
- name: String for the variable name to calculate average value for.
[Methods]
- __init__ : Class initialiser
- update : Function to be implemented by the children sub-classes.
- reset : Function for resetting the number of instances and the sum of the metric.
- get : Calculation of the average value based on the number of instances and the provided sum.
- get_name_value : Function for returning the name(s) and the value(s).
- check_label_shapes : Function responsible for type and shape checking.
'''
class EvalMetric(object):
def __init__(self, name, **kwargs):
self.name = str(name)
self.reset()
def update(self, preds, labels, losses, lr, batch_size):
raise NotImplementedError('Must be implemented in child classes!')
def reset(self):
self.num_inst = 0
self.sum_metric = 0.0
def get(self):
# case that instances are 0 -> return NaN
if self.num_inst == 0:
return (self.name, float('nan'))
# case that instances are 1 -> return their sum
if self.num_inst == 1:
return(self.name, self.sum_metric)
# case that instances are >1 -> return average
else:
return (self.name, self.sum_metric / self.num_inst)
def get_name_value(self):
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return list(zip(name, value))
def check_label_shapes(self, preds, labels):
# raise if the shape is inconsistent
if (type(labels) is list) and (type(preds) is list):
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape[0], preds.shape[0]
if label_shape != pred_shape:
raise NotImplementedError("")
'''
=== E N D O F C L A S S E V A L M E T R I C ===
'''
'''
=== S T A R T O F C L A S S M E T R I C L I S T ===
[About]
EvalMetric class for creating a list containing Evalmetric objects.
[Init Args]
- name: String for the variable name.
[Methods]
- __init__ : Class initialiser
- update : Function to update the list of EvalMetric objects.
- reset : Function for resetting the list.
- get : Function for getting each of the EvalMetric objects in the list.
- get_name_value : Function for getting the name of the list items.
'''
class MetricList(EvalMetric):
def __init__(self, *args, name="metric_list"):
assert all([issubclass(type(x), EvalMetric) for x in args]), \
"MetricList input is illegal: {}".format(args)
self.metrics = [metric for metric in args]
super(MetricList, self).__init__(name=name)
def update(self, preds, labels, losses=None, lr=None, batch_size=None):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
losses = [losses] if type(losses) is not list else losses
lr = [lr] if type(lr) is not list else lr
batch_size = [batch_size] if type(batch_size) is not list else batch_size
for metric in self.metrics:
metric.update(preds, labels, losses, lr, batch_size)
def reset(self):
if hasattr(self, 'metrics'):
for metric in self.metrics:
metric.reset()
else:
logging.warning("No metric defined.")
def get(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get())
return ouputs
def get_name_value(self):
ouputs = []
for metric in self.metrics:
ouputs.append(metric.get_name_value())
return ouputs
'''
=== E N D O F C L A S S M E T R I C L I S T ===
'''
'''
=== S T A R T O F C L A S S A C C U R A C Y ===
[About]
EvalMetric class for creating an accuracy estimate.
[Init Args]
- name: String for the variable name. Defaults to `accuracy`.
- topk: Number of top predictions to be used of the score (top-1, top-5 etc.).
Defaults to 1.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Accuracy(EvalMetric):
def __init__(self, name='accuracy', topk=1):
super(Accuracy, self).__init__(name)
self.topk = topk
def update(self, preds, labels, losses, lr, batch_size):
preds = [preds] if type(preds) is not list else preds
labels = [labels] if type(labels) is not list else labels
self.check_label_shapes(preds, labels)
for pred, label in zip(preds, labels):
assert self.topk <= pred.shape[1], \
"topk({}) should no larger than the pred dim({})".format(self.topk, pred.shape[1])
_, pred_topk = pred.topk(self.topk, 1, True, True)
pred_topk = pred_topk.t()
correct = pred_topk.eq(label.view(1, -1).expand_as(pred_topk))
self.sum_metric += float(correct.reshape(-1).float().sum(0, keepdim=True).numpy())
self.num_inst += label.shape[0]
'''
=== E N D O F C L A S S A C C U R A C Y ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for creating a loss score. The class acts a a `dummy estimate`
as no further calculations are required for the loss. Instead it is primarily
used to easily/directly print the loss.
[Init Args]
- name: String for the variable name. Defaults to `loss`.
[Methods]
- __init__ : Class initialiser
- update : Function to update scores.
'''
class Loss(EvalMetric):
def __init__(self, name='loss'):
super(Loss, self).__init__(name)
def update(self, preds, labels, losses, lr, batch_size):
assert losses is not None, "Loss undefined."
for loss in losses:
self.sum_metric += float(loss.numpy().sum())
self.num_inst += 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L O S S ===
[About]
EvalMetric class for batch-size used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the batch. Instead it is primarily
used to easily/directly print the batch size.
[Init Args]
- name: String for the variable name. Defaults to `batch-size`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class BatchSize(EvalMetric):
def __init__(self, name='batch-size'):
super(BatchSize, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert batch_sizes is not None, "Batch size undefined."
self.sum_metric = batch_sizes
self.num_inst = 1
'''
=== E N D O F C L A S S L O S S ===
'''
'''
=== S T A R T O F C L A S S L E A R N I N G R A T E ===
[About]
EvalMetric class for learning rate used. The class acts a a `dummy estimate`
as no further calculations are required for the size of the lr. Instead it is primarily
used to easily/directly print the learning rate.
[Init Args]
- name: String for the variable name. Defaults to `lr`.
[Methods]
- __init__ : Class initialiser
- update : Function used for updates.
'''
class LearningRate(EvalMetric):
def __init__(self, name='lr'):
super(LearningRate, self).__init__(name)
def update(self, preds, labels, losses, lrs, batch_sizes):
assert lrs is not None, "Learning rate undefined."
self.sum_metric = lrs[-1]
self.num_inst = 1
'''
=== E N D O F C L A S S L E A R N I N G R A T E ===
'''
if __name__ == "__main__":
import torch
# Test Accuracy
predicts = [torch.from_numpy(np.array([[0.7, 0.3], [0, 1.], [0.4, 0.6]]))]
labels = [torch.from_numpy(np.array([ 0, 1, 1 ]))]
losses = [torch.from_numpy(np.array([ 0.3, 0.4, 0.5 ]))]
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("input pred: {}".format(predicts))
logging.debug("input label: {}".format(labels))
logging.debug("input loss: {}".format(labels))
acc = Accuracy()
acc.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info(acc.get())
# Test MetricList
metrics = MetricList(Loss(name="ce-loss"),
Accuracy(topk=1, name="acc-top1"),
Accuracy(topk=2, name="acc-top2"),
)
metrics.update(preds=predicts, labels=labels, losses=losses, lr=0, batch_size=1)
logging.info("------------")
logging.info(metrics.get())
acc.get_name_value()
| [
"logging.getLogger",
"coloredlogs.install",
"logging.warning",
"numpy.array",
"logging.info"
] | [((79, 100), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (98, 100), False, 'import coloredlogs, logging\n'), ((9112, 9140), 'logging.info', 'logging.info', (['"""------------"""'], {}), "('------------')\n", (9124, 9140), False, 'import coloredlogs, logging\n'), ((3791, 3828), 'logging.warning', 'logging.warning', (['"""No metric defined."""'], {}), "('No metric defined.')\n", (3806, 3828), False, 'import coloredlogs, logging\n'), ((8262, 8306), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0, 1.0], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0, 1.0], [0.4, 0.6]])\n', (8270, 8306), True, 'import numpy as np\n'), ((8341, 8360), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (8349, 8360), True, 'import numpy as np\n'), ((8420, 8445), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.5]'], {}), '([0.3, 0.4, 0.5])\n', (8428, 8445), True, 'import numpy as np\n'), ((8471, 8490), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (8488, 8490), False, 'import coloredlogs, logging\n')] |
import sys
from string import whitespace
from clint.textui import puts, indent, colored
from poly.common import *
from poly.node import *
def repl_main(args):
repl = Repl("repl")
repl.run()
class UndefinedCommandError(PolyError):
def __init__(self, command):
self.message = "Undefined command '{}'".format(command)
class Repl:
def __init__(self, name, in_prompt=None, out_prompt=None):
self.node = Node(name)
if in_prompt is None:
in_prompt = ">> "
self.in_prompt = in_prompt
if out_prompt is None:
out_prompt = "\n" + " " * len(in_prompt)
self.out_prompt = out_prompt
try:
self.node.load_module("prelude.poly", "")
except ModuleError as e:
self.print_error(e)
def run(self):
self.print_banner("Poly 0.0")
while True:
s, is_command = self.get_input()
if is_command:
try:
exit = self.handle_command(s)
except UndefinedCommandError as e:
self.print_error(e)
exit = False
if exit:
break
else:
continue
try:
expr = self.node.read(s)
self.eval_and_print(expr)
except PolyError as e:
self.print_error(e)
def eval_and_print(self, expr0):
expr1 = self.node.eval(expr0)
self.print_result(expr1)
self.node.env.table["$"] = expr1
def handle_command(self, cmd):
if cmd in ["q", "quit"]:
return True
elif cmd[0] == " ":
self.print_warning(cmd[1:])
else:
raise UndefinedCommandError(cmd)
return False
def get_input(self):
while True:
try:
prompt = self.in_prompt
puts(prompt, newline=False)
s = input().strip()
if empty_space(s):
continue
elif s[0] == ":":
return s[1:], True
else:
return s, False
except (EOFError, KeyboardInterrupt):
puts()
return "quit", True
def print_banner(self, s, width=72):
line = "-" * width
puts(line)
puts(s)
puts(line + "\n")
def print_result(self, expr):
prompt = colored.blue(self.out_prompt)
puts(prompt + str(expr) + "\n")
def print_str(self, s):
puts(s)
def print_warning(self, s):
sign = colored.yellow("Warning: ")
puts(sign + s + "\n")
def print_error(self, e):
sign = colored.red("Error: ")
puts(sign + e.message + "\n")
def empty_space(s):
if len(s) == 0:
return True
for c in s:
if s in whitespace:
return True
return False
if __name__ == "__main__":
repl_main(sys.argv[1:])
| [
"clint.textui.colored.blue",
"clint.textui.colored.red",
"clint.textui.puts",
"clint.textui.colored.yellow"
] | [((2381, 2391), 'clint.textui.puts', 'puts', (['line'], {}), '(line)\n', (2385, 2391), False, 'from clint.textui import puts, indent, colored\n'), ((2400, 2407), 'clint.textui.puts', 'puts', (['s'], {}), '(s)\n', (2404, 2407), False, 'from clint.textui import puts, indent, colored\n'), ((2416, 2433), 'clint.textui.puts', 'puts', (["(line + '\\n')"], {}), "(line + '\\n')\n", (2420, 2433), False, 'from clint.textui import puts, indent, colored\n'), ((2486, 2515), 'clint.textui.colored.blue', 'colored.blue', (['self.out_prompt'], {}), '(self.out_prompt)\n', (2498, 2515), False, 'from clint.textui import puts, indent, colored\n'), ((2593, 2600), 'clint.textui.puts', 'puts', (['s'], {}), '(s)\n', (2597, 2600), False, 'from clint.textui import puts, indent, colored\n'), ((2649, 2676), 'clint.textui.colored.yellow', 'colored.yellow', (['"""Warning: """'], {}), "('Warning: ')\n", (2663, 2676), False, 'from clint.textui import puts, indent, colored\n'), ((2685, 2706), 'clint.textui.puts', 'puts', (["(sign + s + '\\n')"], {}), "(sign + s + '\\n')\n", (2689, 2706), False, 'from clint.textui import puts, indent, colored\n'), ((2753, 2775), 'clint.textui.colored.red', 'colored.red', (['"""Error: """'], {}), "('Error: ')\n", (2764, 2775), False, 'from clint.textui import puts, indent, colored\n'), ((2784, 2813), 'clint.textui.puts', 'puts', (["(sign + e.message + '\\n')"], {}), "(sign + e.message + '\\n')\n", (2788, 2813), False, 'from clint.textui import puts, indent, colored\n'), ((1933, 1960), 'clint.textui.puts', 'puts', (['prompt'], {'newline': '(False)'}), '(prompt, newline=False)\n', (1937, 1960), False, 'from clint.textui import puts, indent, colored\n'), ((2260, 2266), 'clint.textui.puts', 'puts', ([], {}), '()\n', (2264, 2266), False, 'from clint.textui import puts, indent, colored\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from masked_cross_entropy import *
from preprocess import *
from parameter import *
import time
# # Training
def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
batch_size = BATCH_SIZE
clip = CLIP
# Zero gradients of both optimizers
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0 # Added onto for each word
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index] * batch_size)
# Use last (forward) hidden state from encoder
# encoder_hidden size: num_layers * num_directions(=2), batch, hidden_size
# decoder_hidden size: num_layers, batch, hidden_size
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Find the max length
max_target_length = max(target_lengths)
# Initialize decoder output
all_decoder_outputs = torch.zeros(
max_target_length, batch_size, decoder.output_size)
# Move new Variables to CUDA
if USE_CUDA:
decoder_input = decoder_input.cuda()
all_decoder_outputs = all_decoder_outputs.cuda()
# Run through decoder one time step at a time
for t in range(max_target_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
all_decoder_outputs[t] = decoder_output
decoder_input = target_batches[t] # Next input is current target
# Loss calculation and backpropagation
# loss_cal = nn.BCELoss()
# loss = loss_cal(all_decoder_outputs, target_batches)
# print("target:", target_batches.size())
# print("output:", all_decoder_outputs.size())
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq
target_batches.transpose(0, 1).contiguous(), # -> batch x seq
target_lengths
)
loss.backward()
# Clip gradient norms
ec = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
dc = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Update parameters with optimizers
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item(), ec, dc
# # Evaluating the network
# def evaluate(input_seq, max_length=MAX_LENGTH):
def evaluate(input_batches, input_lengths, input_lang, output_lang, encoder, decoder, max_length=MAX_LENGTH):
# Set to not-training mode to disable dropout
encoder.train(False)
decoder.train(False)
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Inference only, no back propagation
with torch.no_grad():
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index])
# Use last (forward) hidden state from encoder
decoder_hidden = encoder_hidden[:decoder.n_layers]
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Store output words and attention states
output_sindices = []
decoder_attentions = torch.zeros(max_length + 1, max_length + 1)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
decoder_attentions[di, :decoder_attn.size(
2)] += decoder_attn.squeeze(0).squeeze(0).cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# Extract number from pytorch variable
ni = ni.item()
output_sindices.append(ni)
if ni == EOS_index:
break
# Next input is chosen word
decoder_input = torch.LongTensor([ni])
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Set back to training mode
encoder.train(True)
decoder.train(True)
return output_sindices, decoder_attentions[:di + 1, :len(encoder_outputs)]
def evaluate_and_show_attention(input_sentence, input_length, input_lang, output_lang,
target_batches, encoder, decoder, epoch):
sindices, attentions = evaluate(
input_sentence, input_length, input_lang, output_lang, encoder, decoder)
input_sentence = indices_to_sentence(input_lang, input_sentence)
output_sentence = indices_to_sentence(output_lang, sindices)
target_sentence = indices_to_sentence(output_lang, target_batches)
print_summary = 'Evaluation:'+'\n'
print_summary += ' in/src:' + input_sentence + '\n'
print_summary += ' out:' + output_sentence + '\n'
if target_sentence is not None:
print_summary += ' tgt:' + target_sentence + '\n'
show_attention(input_sentence, output_sentence, attentions, epoch)
return input_sentence, output_sentence, target_sentence
def show_attention(input_sentence, output_sentence, attentions, epoch):
# Set up figure with colorbar
# print(attentions)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' '), rotation=90)
ax.set_yticklabels([''] + output_sentence.split(' '))
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
fig.savefig(PLOT_PATH + '/epoch-%d.png' % epoch)
fig.savefig(PLOT_PATH + '/last.png')
# plt.show(block=True)
# plt.close()
| [
"matplotlib.ticker.MultipleLocator",
"torch.LongTensor",
"matplotlib.pyplot.figure",
"torch.no_grad",
"torch.zeros"
] | [((839, 881), 'torch.LongTensor', 'torch.LongTensor', (['([SOS_index] * batch_size)'], {}), '([SOS_index] * batch_size)\n', (855, 881), False, 'import torch\n'), ((1267, 1330), 'torch.zeros', 'torch.zeros', (['max_target_length', 'batch_size', 'decoder.output_size'], {}), '(max_target_length, batch_size, decoder.output_size)\n', (1278, 1330), False, 'import torch\n'), ((3498, 3541), 'torch.zeros', 'torch.zeros', (['(max_length + 1)', '(max_length + 1)'], {}), '(max_length + 1, max_length + 1)\n', (3509, 3541), False, 'import torch\n'), ((5483, 5495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5493, 5495), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3122), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3120, 3122), False, 'import torch\n'), ((3185, 3214), 'torch.LongTensor', 'torch.LongTensor', (['[SOS_index]'], {}), '([SOS_index])\n', (3201, 3214), False, 'import torch\n'), ((4193, 4215), 'torch.LongTensor', 'torch.LongTensor', (['[ni]'], {}), '([ni])\n', (4209, 4215), False, 'import torch\n'), ((5822, 5847), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (5844, 5847), True, 'import matplotlib.ticker as ticker\n'), ((5881, 5906), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(1)'], {}), '(1)\n', (5903, 5906), True, 'import matplotlib.ticker as ticker\n')] |
import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_SimpleAnalysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_1D_analysis_multi_file(self):
a = ma.Basic1DAnalysis(t_start='20170726_164507',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_1D_analysis_single_file(self):
# giving only a single file
a = ma.Basic1DAnalysis(t_start='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertEqual(a.timestamps, ['20170726_164845'])
def test_2D_analysis_multi_file(self):
# N.B. by setting x2, x2_label and x2_unit in the options dict
# the values can be plotted versus the varied parameter between
# the linecuts
a = ma.Basic2DAnalysis(t_start='20170726_164521',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_2D_interpolated(self):
a=ma.Basic2DInterpolatedAnalysis(t_start='20180522_030206')
fig_keys = list(a.figs.keys())
exp_list_keys = ['Cost function value', 'Conditional phase',
'offset difference']
self.assertEqual(fig_keys, exp_list_keys)
@unittest.skip('FIXME: disabled, see PR #643')
def test_1D_binned_analysis(self):
a=ma.Basic1DBinnedAnalysis(label='120543_Single_qubit_GST_QL')
| [
"pycqed.analysis_v2.measurement_analysis.Basic1DAnalysis",
"pycqed.analysis_v2.measurement_analysis.Basic2DAnalysis",
"os.path.join",
"matplotlib.pyplot.close",
"pycqed.analysis_v2.measurement_analysis.Basic1DBinnedAnalysis",
"pycqed.analysis_v2.measurement_analysis.Basic2DInterpolatedAnalysis",
"unitte... | [((1725, 1770), 'unittest.skip', 'unittest.skip', (['"""FIXME: disabled, see PR #643"""'], {}), "('FIXME: disabled, see PR #643')\n", (1738, 1770), False, 'import unittest\n'), ((239, 255), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (248, 255), True, 'import matplotlib.pyplot as plt\n'), ((323, 373), 'os.path.join', 'os.path.join', (['pq.__path__[0]', '"""tests"""', '"""test_data"""'], {}), "(pq.__path__[0], 'tests', 'test_data')\n", (335, 373), False, 'import os\n'), ((472, 588), 'pycqed.analysis_v2.measurement_analysis.Basic1DAnalysis', 'ma.Basic1DAnalysis', ([], {'t_start': '"""20170726_164507"""', 't_stop': '"""20170726_164845"""', 'options_dict': "{'scan_label': 'flipping'}"}), "(t_start='20170726_164507', t_stop='20170726_164845',\n options_dict={'scan_label': 'flipping'})\n", (490, 588), True, 'from pycqed.analysis_v2 import measurement_analysis as ma\n'), ((787, 877), 'pycqed.analysis_v2.measurement_analysis.Basic1DAnalysis', 'ma.Basic1DAnalysis', ([], {'t_start': '"""20170726_164845"""', 'options_dict': "{'scan_label': 'flipping'}"}), "(t_start='20170726_164845', options_dict={'scan_label':\n 'flipping'})\n", (805, 877), True, 'from pycqed.analysis_v2 import measurement_analysis as ma\n'), ((1188, 1304), 'pycqed.analysis_v2.measurement_analysis.Basic2DAnalysis', 'ma.Basic2DAnalysis', ([], {'t_start': '"""20170726_164521"""', 't_stop': '"""20170726_164845"""', 'options_dict': "{'scan_label': 'flipping'}"}), "(t_start='20170726_164521', t_stop='20170726_164845',\n options_dict={'scan_label': 'flipping'})\n", (1206, 1304), True, 'from pycqed.analysis_v2 import measurement_analysis as ma\n'), ((1457, 1514), 'pycqed.analysis_v2.measurement_analysis.Basic2DInterpolatedAnalysis', 'ma.Basic2DInterpolatedAnalysis', ([], {'t_start': '"""20180522_030206"""'}), "(t_start='20180522_030206')\n", (1487, 1514), True, 'from pycqed.analysis_v2 import measurement_analysis as ma\n'), ((1820, 1880), 'pycqed.analysis_v2.measurement_analysis.Basic1DBinnedAnalysis', 'ma.Basic1DBinnedAnalysis', ([], {'label': '"""120543_Single_qubit_GST_QL"""'}), "(label='120543_Single_qubit_GST_QL')\n", (1844, 1880), True, 'from pycqed.analysis_v2 import measurement_analysis as ma\n')] |
import turtle
'''http://www.algorithm.co.il/blogs/computer-science/fractals-in-10-minutes-no-6-turtle-snowflake/
This would be a good introduction to recursion. I don't see how students
would invent this on their own, but they could modify it and see what
other fractals they could generate.
'''
pen = turtle.Turtle()
pen.penup()
pen.goto(-200,0)
pen.pendown()
pen.speed(0)
def fractal(pen, length, depth):
#Base case
if depth == 0:
pen.forward(length)
#Recursive case
else:
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
pen.left(120)
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
#Draw the fractal
fractal(pen, 500, 4)
turtle.done()
| [
"turtle.done",
"turtle.Turtle"
] | [((303, 318), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (316, 318), False, 'import turtle\n'), ((767, 780), 'turtle.done', 'turtle.done', ([], {}), '()\n', (778, 780), False, 'import turtle\n')] |
from django.urls import re_path
from user_queries.views import UserQuerySaveView, UserQueryCollectView
urlpatterns = [
re_path(r"^/save/?$", UserQuerySaveView.as_view(), name="user-save-query"),
re_path(
r"^/collect/?$",
UserQueryCollectView.as_view(),
name="user-collect-queries",
),
]
| [
"user_queries.views.UserQuerySaveView.as_view",
"user_queries.views.UserQueryCollectView.as_view"
] | [((146, 173), 'user_queries.views.UserQuerySaveView.as_view', 'UserQuerySaveView.as_view', ([], {}), '()\n', (171, 173), False, 'from user_queries.views import UserQuerySaveView, UserQueryCollectView\n'), ((246, 276), 'user_queries.views.UserQueryCollectView.as_view', 'UserQueryCollectView.as_view', ([], {}), '()\n', (274, 276), False, 'from user_queries.views import UserQuerySaveView, UserQueryCollectView\n')] |
import pandas as pd
from math import log
class InfoGain():
def __init__(self, path):
self._path=path
def extractVariables(self):
self._df = pd.read_csv(self._path + ".csv");
# put the original column names in a python list
'''if 'Unnamed: 0' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0']);
if 'Unnamed: 0.1' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0.1']);
'''
self._categories=list(self._df.columns.values)
print(self._categories)
self._totalRows=self._df.count()
def splitCategories(self):
self._dfNormal=self._df
def entropy(pi):
'''
pi is an array that contain classifications
return the Entropy of a probability distribution:
entropy(p) = − SUM (Pi * log(Pi) )
defintion:
entropy is a metric to measure the uncertainty of a probability distribution.
entropy ranges between 0 to 1
Low entropy means the distribution varies (peaks and valleys).
High entropy means the distribution is uniform.
See:
http://www.cs.csi.cuny.edu/~imberman/ai/Entropy%20and%20Information%20Gain.htm
'''
total = 0
for p in pi:
p = p / sum(pi)
if p != 0:
total += p * log(p, 2)
else:
total += 0
total *= -1
return total
def gain(d, a):
'''
return the information gain:
gain(D, A) = entropy(D)− SUM ( |Di| / |D| * entropy(Di) )
'''
total = 0
for v in a:
total += sum(v) / sum(d) * InfoGain.entropy(v)
gain = InfoGain.entropy(d) - total
return gain
| [
"pandas.read_csv",
"math.log"
] | [((170, 202), 'pandas.read_csv', 'pd.read_csv', (["(self._path + '.csv')"], {}), "(self._path + '.csv')\n", (181, 202), True, 'import pandas as pd\n'), ((1438, 1447), 'math.log', 'log', (['p', '(2)'], {}), '(p, 2)\n', (1441, 1447), False, 'from math import log\n')] |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.impedance import Impedance # noqa: F401,E501
from swagger_server import util
class LinecodeRMatrix(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, phase_r: Impedance=None, phase_s: Impedance=None, phase_t: Impedance=None): # noqa: E501
"""LinecodeRMatrix - a model defined in Swagger
:param phase_r: The phase_r of this LinecodeRMatrix. # noqa: E501
:type phase_r: Impedance
:param phase_s: The phase_s of this LinecodeRMatrix. # noqa: E501
:type phase_s: Impedance
:param phase_t: The phase_t of this LinecodeRMatrix. # noqa: E501
:type phase_t: Impedance
"""
self.swagger_types = {
'phase_r': Impedance,
'phase_s': Impedance,
'phase_t': Impedance
}
self.attribute_map = {
'phase_r': 'phase_R',
'phase_s': 'phase_S',
'phase_t': 'phase_T'
}
self._phase_r = phase_r
self._phase_s = phase_s
self._phase_t = phase_t
@classmethod
def from_dict(cls, dikt) -> 'LinecodeRMatrix':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Linecode_R_Matrix of this LinecodeRMatrix. # noqa: E501
:rtype: LinecodeRMatrix
"""
return util.deserialize_model(dikt, cls)
@property
def phase_r(self) -> Impedance:
"""Gets the phase_r of this LinecodeRMatrix.
:return: The phase_r of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_r
@phase_r.setter
def phase_r(self, phase_r: Impedance):
"""Sets the phase_r of this LinecodeRMatrix.
:param phase_r: The phase_r of this LinecodeRMatrix.
:type phase_r: Impedance
"""
self._phase_r = phase_r
@property
def phase_s(self) -> Impedance:
"""Gets the phase_s of this LinecodeRMatrix.
:return: The phase_s of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_s
@phase_s.setter
def phase_s(self, phase_s: Impedance):
"""Sets the phase_s of this LinecodeRMatrix.
:param phase_s: The phase_s of this LinecodeRMatrix.
:type phase_s: Impedance
"""
self._phase_s = phase_s
@property
def phase_t(self) -> Impedance:
"""Gets the phase_t of this LinecodeRMatrix.
:return: The phase_t of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_t
@phase_t.setter
def phase_t(self, phase_t: Impedance):
"""Sets the phase_t of this LinecodeRMatrix.
:param phase_t: The phase_t of this LinecodeRMatrix.
:type phase_t: Impedance
"""
self._phase_t = phase_t
| [
"swagger_server.util.deserialize_model"
] | [((1651, 1684), 'swagger_server.util.deserialize_model', 'util.deserialize_model', (['dikt', 'cls'], {}), '(dikt, cls)\n', (1673, 1684), False, 'from swagger_server import util\n')] |
# Created by Hansi at 3/16/2020
import os
from algo.data_process.data_preprocessor import data_cleaning_flow
from algo.utils.file_utils import delete_create_folder
def extract_gt_tokens(text):
"""
Given GT string, method to extract GT labels.
GT string should be formatted as Twitter-Event-Data-2019.
parameters
-----------
:param text: str
:return: list
List of GT labels corresponding to a single event
Since there can be duplicate definitions for a single event, this list contains separate label lists for each
duplicate definition.
"""
duplicates = []
for element in text.split("|"):
labels = []
for subelement in element.split("["):
if subelement:
subelement = subelement.replace("\n", "")
subelement = subelement.replace("]", "")
tokens = subelement.split(",")
labels.append(tokens)
duplicates.append(labels)
return duplicates
def load_gt(folder_path):
"""
Method to read GT data into a dictionary formatted as {time-window: labels}
parameters
-----------
:param folder_path: str
Path to folder which contains GT data
:return: object
Dictionary of GT data
"""
gt = dict()
for root, dirs, files in os.walk(folder_path):
for file in files:
file_name = os.path.splitext(file)[0]
f = open(os.path.join(folder_path, file), 'r', encoding='utf-8')
events = []
for line in f:
tokens = extract_gt_tokens(line)
events.append(tokens)
gt[file_name] = events
f.close()
return gt
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str
def get_combined_gt(gt):
"""
Combine the GT labels of multiple events available at a time frame into single event representation.
parameters
-----------
:param gt: object
Dictionary of GT returned by load_GT
:return: object
Dictionary of combined GT
"""
combined_gt = dict()
for time_frame in gt.keys():
gt_events = gt[time_frame]
combined_gt_event = gt_events[0]
for event in gt_events[1:]:
temp = []
for duplicate in event:
for combined_event in combined_gt_event:
temp.append(combined_event + duplicate)
combined_gt_event = temp
# even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods
events = [combined_gt_event]
combined_gt[time_frame] = events
return combined_gt
def preprocess_gt(input_filepath, output_filepath):
"""
Preprocess ground truth data in input_file and save to the output_file
parameters
-----------
:param input_filepath: str (.txt file path)
Ground truth file formatted as Twitter-Event-Data-2019
:param output_filepath: str (.txt file path)
:return:
"""
input_file = open(input_filepath, 'r')
output_file = open(output_filepath, 'a', encoding='utf-8')
events = []
for line in input_file:
tokens = extract_gt_tokens(line)
events.append(tokens)
# update tokens
new_events = []
for event in events:
new_duplicates = []
for duplicate in event:
new_labels = []
for label in duplicate:
new_elements = []
for element in label:
new_label = data_cleaning_flow(element)
new_elements.append(new_label)
new_labels.append(new_elements)
new_duplicates.append(new_labels)
new_events.append(new_duplicates)
for event in new_events:
str = generate_gt_string(event)
output_file.write(str)
output_file.write("\n")
output_file.close()
def preprocess_gt_bulk(input_folder_path, output_folder_path):
"""
Preprocess ground truth data in all files in input_folder and save to the output_folder
parameters
-----------
:param input_folder_path: str
Path to folder which contains GT data files
:param output_folder_path: str
Path to folder to save preprocessed GT data
:return:
"""
# delete if there already exist a folder and create new folder
delete_create_folder(output_folder_path)
for root, dirs, files in os.walk(input_folder_path):
for file in files:
input_filepath = os.path.join(input_folder_path, file)
output_filepath = os.path.join(output_folder_path, file)
preprocess_gt(input_filepath, output_filepath) | [
"algo.data_process.data_preprocessor.data_cleaning_flow",
"algo.utils.file_utils.delete_create_folder",
"os.path.splitext",
"os.path.join",
"os.walk"
] | [((1333, 1353), 'os.walk', 'os.walk', (['folder_path'], {}), '(folder_path)\n', (1340, 1353), False, 'import os\n'), ((4983, 5023), 'algo.utils.file_utils.delete_create_folder', 'delete_create_folder', (['output_folder_path'], {}), '(output_folder_path)\n', (5003, 5023), False, 'from algo.utils.file_utils import delete_create_folder\n'), ((5054, 5080), 'os.walk', 'os.walk', (['input_folder_path'], {}), '(input_folder_path)\n', (5061, 5080), False, 'import os\n'), ((5138, 5175), 'os.path.join', 'os.path.join', (['input_folder_path', 'file'], {}), '(input_folder_path, file)\n', (5150, 5175), False, 'import os\n'), ((5206, 5244), 'os.path.join', 'os.path.join', (['output_folder_path', 'file'], {}), '(output_folder_path, file)\n', (5218, 5244), False, 'import os\n'), ((1406, 1428), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1422, 1428), False, 'import os\n'), ((1453, 1484), 'os.path.join', 'os.path.join', (['folder_path', 'file'], {}), '(folder_path, file)\n', (1465, 1484), False, 'import os\n'), ((4149, 4176), 'algo.data_process.data_preprocessor.data_cleaning_flow', 'data_cleaning_flow', (['element'], {}), '(element)\n', (4167, 4176), False, 'from algo.data_process.data_preprocessor import data_cleaning_flow\n')] |
import logging
from queue import Queue
from threading import Thread
from time import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Worker(Thread):
def __init__(self, queue, out_que):
Thread.__init__(self)
self.queue = queue
self.out_que = out_que
def run(self):
while True:
# Get the work from the queue and expand the tuple
video, txnId = self.queue.get()
try:
v = video.generate_video_part(txnId)
self.out_que.put(v)
finally:
self.queue.task_done()
def main(video_obj_arr, txnId, n):
ts = time()
# Create a queue to communicate with the worker threads
queue = Queue()
out_que = Queue()
# Create 7 worker threads
for x in range(2):
worker = Worker(queue, out_que)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for i in range(1, n):
logger.info('Queueing {}'.format(i))
queue.put((video_obj_arr[i-1], txnId))
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
logging.info('Took %s', time() - ts)
return out_que
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"threading.Thread.__init__",
"logging.getLogger",
"queue.Queue",
"time.time"
] | [((91, 198), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (110, 198), False, 'import logging\n'), ((204, 231), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (221, 231), False, 'import logging\n'), ((744, 750), 'time.time', 'time', ([], {}), '()\n', (748, 750), False, 'from time import time\n'), ((823, 830), 'queue.Queue', 'Queue', ([], {}), '()\n', (828, 830), False, 'from queue import Queue\n'), ((845, 852), 'queue.Queue', 'Queue', ([], {}), '()\n', (850, 852), False, 'from queue import Queue\n'), ((305, 326), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (320, 326), False, 'from threading import Thread\n'), ((1393, 1399), 'time.time', 'time', ([], {}), '()\n', (1397, 1399), False, 'from time import time\n')] |
#!/usr/bin/env python
import saml2
from saml2 import SamlBase
from saml2.xmldsig import KeyInfo
NAMESPACE = 'urn:net:eustix:names:tc:PEFIM:0.0:assertion'
class SPCertEncType_(SamlBase):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEncType element """
c_tag = 'SPCertEncType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_attributes['VerifyDepth'] = ('verify_depth', 'unsignedByte', False)
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
x509_data=None,
verify_depth='1',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if key_info:
self.key_info = key_info
elif x509_data:
self.key_info = KeyInfo(x509_data=x509_data)
else:
self.key_info = []
self.verify_depth = verify_depth
#self.x509_data = x509_data
def spcertenc_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEncType_, xml_string)
class SPCertEnc(SPCertEncType_):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEnc element """
c_tag = 'SPCertEnc'
c_namespace = NAMESPACE
c_children = SPCertEncType_.c_children.copy()
c_attributes = SPCertEncType_.c_attributes.copy()
c_child_order = SPCertEncType_.c_child_order[:]
c_cardinality = SPCertEncType_.c_cardinality.copy()
def spcertenc_from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEnc, xml_string)
ELEMENT_FROM_STRING = {
SPCertEnc.c_tag: spcertenc_from_string,
SPCertEncType_.c_tag: spcertenc_type__from_string,
}
ELEMENT_BY_TAG = {
'SPCertEnc': SPCertEnc,
'SPCertEncType': SPCertEncType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs) | [
"saml2.SamlBase.c_children.copy",
"saml2.xmldsig.KeyInfo",
"saml2.SamlBase.__init__",
"saml2.SamlBase.c_cardinality.copy",
"saml2.SamlBase.c_attributes.copy",
"saml2.create_class_from_xml_string"
] | [((345, 371), 'saml2.SamlBase.c_children.copy', 'SamlBase.c_children.copy', ([], {}), '()\n', (369, 371), False, 'from saml2 import SamlBase\n'), ((391, 419), 'saml2.SamlBase.c_attributes.copy', 'SamlBase.c_attributes.copy', ([], {}), '()\n', (417, 419), False, 'from saml2 import SamlBase\n'), ((486, 515), 'saml2.SamlBase.c_cardinality.copy', 'SamlBase.c_cardinality.copy', ([], {}), '()\n', (513, 515), False, 'from saml2 import SamlBase\n'), ((1586, 1648), 'saml2.create_class_from_xml_string', 'saml2.create_class_from_xml_string', (['SPCertEncType_', 'xml_string'], {}), '(SPCertEncType_, xml_string)\n', (1620, 1648), False, 'import saml2\n'), ((2078, 2135), 'saml2.create_class_from_xml_string', 'saml2.create_class_from_xml_string', (['SPCertEnc', 'xml_string'], {}), '(SPCertEnc, xml_string)\n', (2112, 2135), False, 'import saml2\n'), ((1072, 1192), 'saml2.SamlBase.__init__', 'SamlBase.__init__', (['self'], {'text': 'text', 'extension_elements': 'extension_elements', 'extension_attributes': 'extension_attributes'}), '(self, text=text, extension_elements=extension_elements,\n extension_attributes=extension_attributes)\n', (1089, 1192), False, 'from saml2 import SamlBase\n'), ((1377, 1405), 'saml2.xmldsig.KeyInfo', 'KeyInfo', ([], {'x509_data': 'x509_data'}), '(x509_data=x509_data)\n', (1384, 1405), False, 'from saml2.xmldsig import KeyInfo\n')] |
# Generated by Django 2.2.4 on 2019-08-24 06:02
from django.db import connection as con, migrations
from psycopg2 import sql
def remove_old_migration_refs(apps, schema_editor):
__sql_delete_migration_ref = 'DELETE FROM django_migrations WHERE app={0}'
old_apps = [
'action', 'core', 'dataops', 'logs', 'oauth', 'ontask_oauth',
'profiles', 'scheduler', 'table', 'workflow']
with con.cursor() as cursor:
for app_name in old_apps:
cursor.execute(
sql.SQL(__sql_delete_migration_ref).format(
sql.Literal(app_name)))
class Migration(migrations.Migration):
dependencies = [
('ontask', '0003_transfer_siteprefs'),
]
operations = [
migrations.RunPython(code=remove_old_migration_refs),
]
| [
"psycopg2.sql.Literal",
"django.db.connection.cursor",
"django.db.migrations.RunPython",
"psycopg2.sql.SQL"
] | [((411, 423), 'django.db.connection.cursor', 'con.cursor', ([], {}), '()\n', (421, 423), True, 'from django.db import connection as con, migrations\n'), ((745, 797), 'django.db.migrations.RunPython', 'migrations.RunPython', ([], {'code': 'remove_old_migration_refs'}), '(code=remove_old_migration_refs)\n', (765, 797), False, 'from django.db import connection as con, migrations\n'), ((577, 598), 'psycopg2.sql.Literal', 'sql.Literal', (['app_name'], {}), '(app_name)\n', (588, 598), False, 'from psycopg2 import sql\n'), ((513, 548), 'psycopg2.sql.SQL', 'sql.SQL', (['__sql_delete_migration_ref'], {}), '(__sql_delete_migration_ref)\n', (520, 548), False, 'from psycopg2 import sql\n')] |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
from . import utils
from flask_babel import lazy_gettext as _
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for the bAbI dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
if 'train_text_data' not in self.userdata:
# get task ID
try:
task_id = int(self.task_id)
except:
task_id = None
self.userdata['task_id'] = task_id
# get data - this doesn't scale well to huge datasets but this makes it
# straightforard to create a mapping of words to indices and figure out max
# dimensions of stories and sentences
self.userdata['train_text_data'] = utils.parse_folder_phase(
self.story_folder, task_id, train=True)
self.userdata['stats'] = utils.get_stats(self.userdata['train_text_data'])
@override
def encode_entry(self, entry):
stats = self.userdata['stats']
return utils.encode_sample(entry, stats['word_map'], stats['sentence_size'], stats['story_size'])
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-babi"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return _("bAbI")
@override
def itemize_entries(self, stage):
entries = []
if not self.userdata['is_inference_db']:
data = self.userdata['train_text_data']
n_val_entries = int(len(data)*self.pct_val/100)
if stage == constants.TRAIN_DB:
entries = data[n_val_entries:]
elif stage == constants.VAL_DB:
entries = data[:n_val_entries]
elif stage == constants.TEST_DB:
if not bool(self.snippet):
raise ValueError("You must write a story and a question")
entries = utils.parse_lines(str(self.snippet).splitlines())
return entries
| [
"flask_babel.lazy_gettext",
"os.path.abspath",
"os.path.join"
] | [((2972, 2981), 'flask_babel.lazy_gettext', '_', (['"""bAbI"""'], {}), "('bAbI')\n", (2973, 2981), True, 'from flask_babel import lazy_gettext as _\n'), ((2354, 2379), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2369, 2379), False, 'import os\n'), ((2723, 2748), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2738, 2748), False, 'import os\n'), ((2405, 2450), 'os.path.join', 'os.path.join', (['extension_dir', 'DATASET_TEMPLATE'], {}), '(extension_dir, DATASET_TEMPLATE)\n', (2417, 2450), False, 'import os\n'), ((2774, 2821), 'os.path.join', 'os.path.join', (['extension_dir', 'INFERENCE_TEMPLATE'], {}), '(extension_dir, INFERENCE_TEMPLATE)\n', (2786, 2821), False, 'import os\n')] |
import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output2.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD','EXP']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*2 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'VA'].sum(axis='index'))
valueA.columns = pd.MultiIndex.from_product([['Total'],['ValueA']],names=['region','row'])
IMP = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'IMP'].sum(axis='index'))
IMP.columns = pd.MultiIndex.from_product([['Total'],['IMP']],names=['region','row'])
output = pd.concat([MRIO.loc[~MRIO.index.get_level_values(1).isin(['FD','EXP'])]])
output = output.drop(['VA','IMP'], level=1)
output = pd.concat([output,valueA.T,IMP.T])
output = output.reindex(column_mi_reorder, axis='columns')
mrio_arg = ras_method(np.array(output).T,np.array(list(output.sum(axis=1))[:384]+list(output.sum(axis=0)[-48:])),
np.array(list(output.sum(axis=1))[:384]+[output.loc[('Total','ValueA'),:].sum(),output.loc[('Total','IMP'),:].sum()]),
eps=1e-3,print_out=print_output)
mrio_argentina = pd.DataFrame(mrio_arg.T,index=output.index,columns=output.columns)
mrio_argentina.to_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)))
if print_progress:
print('NOTE : Balanced MRIO table with trade finished using {} data'.format(table))
def prepare_table_mria(table='INDEC',year='2015',print_output=True):
"""
Convert MRIO table to an excel file in which all elements of the table are disaggregated.
"""
data_path = os.path.join('..','data')
# load table
MRIO = pd.read_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)),index_col=[0,1],header=[0,1])
Xnew = MRIO.copy()
Xnew = Xnew+1e-6
# write to excel
writer = pd.ExcelWriter(os.path.join(data_path,'MRIO', 'mrio_argentina_disaggregated_{}_{}.xlsx'.format(table,year)))
# write T
df_T = Xnew.iloc[:384, :384]
df_T.columns = df_T.columns.droplevel()
df_labels_T = pd.DataFrame(df_T.reset_index()[['region', 'row']])
df_T.reset_index(inplace=True, drop=True)
df_T.to_excel(writer, 'T', index=False, header=False)
df_labels_T.to_excel(writer, 'labels_T', index=False, header=False)
# write FD
df_FD = Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='FD']
df_labels_FD = pd.DataFrame(list(df_FD.columns))
df_FD.columns = df_FD.columns.droplevel()
df_FD.reset_index(inplace=True, drop=True)
df_FD.to_excel(writer, 'FD', index=False, header=False)
df_labels_FD.to_excel(writer, 'labels_FD', index=False, header=False)
# write ExpROW
df_ExpROW = pd.DataFrame(Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='EXP'].sum(axis=1))
df_labels_ExpROW = pd.DataFrame(['Export'])
df_ExpROW.reset_index(inplace=True, drop=True)
df_ExpROW.to_excel(writer, 'ExpROW', index=False, header=False)
df_labels_ExpROW.reset_index(inplace=True, drop=True)
df_labels_ExpROW.columns = ['Export']
df_labels_ExpROW.to_excel(writer, 'labels_ExpROW', index=False, header=False)
# write VA
df_VA = pd.DataFrame(Xnew.iloc[384:, :409].T[('Total', 'ValueA')])
df_VA.columns = ['VA']
df_VA['imports'] = pd.DataFrame(Xnew.iloc[384:, :].T[('Total', 'IMP')])
df_VA.reset_index(inplace=True, drop=True)
df_VA.to_excel(writer, 'VA', index=False, header=False)
df_labels_VA = pd.DataFrame(['Import', 'VA']).T
df_labels_VA.to_excel(writer, 'labels_VA', index=False, header=False)
# save excel
writer.save()
if print_output:
print('NOTE : MRIO table ready to use for MRIA model using {} data'.format(table))
if __name__ == "__main__":
estimate(table='GTAP',year='2014',print_output=True)
prepare_table_mria(table='GTAP',year='2014',print_output=True) | [
"pandas.MultiIndex.from_product",
"pandas.MultiIndex.from_arrays",
"os.path.join",
"ras_method.ras_method",
"numpy.array",
"pandas.DataFrame",
"pandas.concat",
"warnings.filterwarnings"
] | [((144, 177), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (167, 177), False, 'import warnings\n'), ((986, 1012), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (998, 1012), False, 'import os, sys\n'), ((3994, 4163), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, sectors + ['other1',\n 'other2'], region_names]"], {'names': "['sec1', 'reg1', 'sec2', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names, \n sectors + ['other1', 'other2'], region_names], names=['sec1', 'reg1',\n 'sec2', 'reg2'])\n", (4020, 4163), True, 'import pandas as pd\n'), ((6657, 6734), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, rows]'], {'names': "('region', 'row')"}), "([region_names_list, rows], names=('region', 'row'))\n", (6682, 6734), True, 'import pandas as pd\n'), ((6751, 6828), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, cols]'], {'names': "('region', 'col')"}), "([region_names_list, cols], names=('region', 'col'))\n", (6776, 6828), True, 'import pandas as pd\n'), ((7359, 7452), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_col, sector_only + col_only]'], {'names': "('region', 'col')"}), "([region_col, sector_only + col_only], names=(\n 'region', 'col'))\n", (7384, 7452), True, 'import pandas as pd\n'), ((8445, 8500), 'ras_method.ras_method', 'ras_method', (['X0', 'u', 'v'], {'eps': '(1e-05)', 'print_out': 'print_output'}), '(X0, u, v, eps=1e-05, print_out=print_output)\n', (8455, 8500), False, 'from ras_method import ras_method\n'), ((8552, 8568), 'pandas.DataFrame', 'pd.DataFrame', (['X1'], {}), '(X1)\n', (8564, 8568), True, 'import pandas as pd\n'), ((9555, 9679), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, region_names]"], {'names': "['sec1', 'reg1', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names,\n region_names], names=['sec1', 'reg1', 'reg2'])\n", (9581, 9679), True, 'import pandas as pd\n'), ((12232, 12401), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[sectors + ['other1', 'other2'], region_names, sectors + ['other1',\n 'other2'], region_names]"], {'names': "['sec1', 'reg1', 'sec2', 'reg2']"}), "([sectors + ['other1', 'other2'], region_names, \n sectors + ['other1', 'other2'], region_names], names=['sec1', 'reg1',\n 'sec2', 'reg2'])\n", (12258, 12401), True, 'import pandas as pd\n'), ((14857, 14934), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, rows]'], {'names': "('region', 'row')"}), "([region_names_list, rows], names=('region', 'row'))\n", (14882, 14934), True, 'import pandas as pd\n'), ((14951, 15028), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_names_list, cols]'], {'names': "('region', 'col')"}), "([region_names_list, cols], names=('region', 'col'))\n", (14976, 15028), True, 'import pandas as pd\n'), ((15565, 15658), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[region_col, sector_only + col_only]'], {'names': "('region', 'col')"}), "([region_col, sector_only + col_only], names=(\n 'region', 'col'))\n", (15590, 15658), True, 'import pandas as pd\n'), ((15802, 15878), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Total'], ['ValueA']]"], {'names': "['region', 'row']"}), "([['Total'], ['ValueA']], names=['region', 'row'])\n", (15828, 15878), True, 'import pandas as pd\n'), ((15987, 16060), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['Total'], ['IMP']]"], {'names': "['region', 'row']"}), "([['Total'], ['IMP']], names=['region', 'row'])\n", (16013, 16060), True, 'import pandas as pd\n'), ((16207, 16243), 'pandas.concat', 'pd.concat', (['[output, valueA.T, IMP.T]'], {}), '([output, valueA.T, IMP.T])\n', (16216, 16243), True, 'import pandas as pd\n'), ((16648, 16716), 'pandas.DataFrame', 'pd.DataFrame', (['mrio_arg.T'], {'index': 'output.index', 'columns': 'output.columns'}), '(mrio_arg.T, index=output.index, columns=output.columns)\n', (16660, 16716), True, 'import pandas as pd\n'), ((17132, 17158), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""'], {}), "('..', 'data')\n", (17144, 17158), False, 'import os, sys\n'), ((18402, 18426), 'pandas.DataFrame', 'pd.DataFrame', (["['Export']"], {}), "(['Export'])\n", (18414, 18426), True, 'import pandas as pd\n'), ((18756, 18812), 'pandas.DataFrame', 'pd.DataFrame', (["Xnew.iloc[384:, :409].T['Total', 'ValueA']"], {}), "(Xnew.iloc[384:, :409].T['Total', 'ValueA'])\n", (18768, 18812), True, 'import pandas as pd\n'), ((18865, 18915), 'pandas.DataFrame', 'pd.DataFrame', (["Xnew.iloc[384:, :].T['Total', 'IMP']"], {}), "(Xnew.iloc[384:, :].T['Total', 'IMP'])\n", (18877, 18915), True, 'import pandas as pd\n'), ((1468, 1528), 'os.path.join', 'os.path.join', (['data_path', '"""INDEC"""', '"""PIB_provincial_06_17.xls"""'], {}), "(data_path, 'INDEC', 'PIB_provincial_06_17.xls')\n", (1480, 1528), False, 'import os, sys\n'), ((2623, 2682), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""proxy_reg_arg.csv"""'], {}), "('..', 'mrio_downscaling', 'proxy_reg_arg.csv')\n", (2635, 2682), False, 'import os, sys\n'), ((6076, 6131), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""basetable.csv"""'], {}), "('..', 'mrio_downscaling', 'basetable.csv')\n", (6088, 6131), False, 'import os, sys\n'), ((6853, 6906), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""output1.csv"""'], {}), "('..', 'mrio_downscaling', 'output1.csv')\n", (6865, 6906), False, 'import os, sys\n'), ((15053, 15106), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""', '"""output2.csv"""'], {}), "('..', 'mrio_downscaling', 'output2.csv')\n", (15065, 15106), False, 'import os, sys\n'), ((19044, 19074), 'pandas.DataFrame', 'pd.DataFrame', (["['Import', 'VA']"], {}), "(['Import', 'VA'])\n", (19056, 19074), True, 'import pandas as pd\n'), ((6295, 6333), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""'], {}), "('..', 'mrio_downscaling')\n", (6307, 6333), False, 'import os, sys\n'), ((14497, 14535), 'os.path.join', 'os.path.join', (['""".."""', '"""mrio_downscaling"""'], {}), "('..', 'mrio_downscaling')\n", (14509, 14535), False, 'import os, sys\n'), ((16333, 16349), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (16341, 16349), True, 'import numpy as np\n'), ((1255, 1307), 'os.path.join', 'os.path.join', (['data_path', '"""INDEC"""', '"""sh_cou_06_16.xls"""'], {}), "(data_path, 'INDEC', 'sh_cou_06_16.xls')\n", (1267, 1307), False, 'import os, sys\n'), ((1069, 1156), 'os.path.join', 'os.path.join', (['data_path', '"""other_sources"""', '"""industry_high_level_classification.xlsx"""'], {}), "(data_path, 'other_sources',\n 'industry_high_level_classification.xlsx')\n", (1081, 1156), False, 'import os, sys\n'), ((4341, 4394), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (4353, 4394), True, 'import pandas as pd\n'), ((5084, 5137), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (5096, 5137), True, 'import pandas as pd\n'), ((12582, 12635), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (12594, 12635), True, 'import pandas as pd\n'), ((13497, 13550), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['year', 'gdp']", 'index': 'mi_index'}), "(columns=['year', 'gdp'], index=mi_index)\n", (13509, 13550), True, 'import pandas as pd\n'), ((8905, 8960), 'os.path.join', 'os.path.join', (['data_path', '"""OD_data"""', '"""province_ods.xlsx"""'], {}), "(data_path, 'OD_data', 'province_ods.xlsx')\n", (8917, 8960), False, 'import os, sys\n')] |
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm,
UserCreationForm,
UsernameField,
)
User = get_user_model()
class UserLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(UserLoginForm, self).__init__(*args, **kwargs)
username = UsernameField(widget=forms.TextInput(
attrs={'class': 'bg-gray-100 rounded-lg p-2'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'bg-gray-100 rounded-lg p-2',
}
))
class UserSignUpForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserSignUpForm, self).__init__(*args, **kwargs)
username = forms.CharField(
widget=forms.TextInput(attrs={"class": "bg-gray-100 rounded-lg p-2"})
)
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
| [
"django.contrib.auth.get_user_model",
"django.forms.PasswordInput",
"django.forms.TextInput"
] | [((187, 203), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (201, 203), False, 'from django.contrib.auth import get_user_model\n'), ((386, 448), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'bg-gray-100 rounded-lg p-2'}"}), "(attrs={'class': 'bg-gray-100 rounded-lg p-2'})\n", (401, 448), False, 'from django import forms\n'), ((497, 563), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'bg-gray-100 rounded-lg p-2'}"}), "(attrs={'class': 'bg-gray-100 rounded-lg p-2'})\n", (516, 563), False, 'from django import forms\n'), ((795, 857), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'class': 'bg-gray-100 rounded-lg p-2'}"}), "(attrs={'class': 'bg-gray-100 rounded-lg p-2'})\n", (810, 857), False, 'from django import forms\n'), ((912, 978), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'bg-gray-100 rounded-lg p-2'}"}), "(attrs={'class': 'bg-gray-100 rounded-lg p-2'})\n", (931, 978), False, 'from django import forms\n'), ((1086, 1152), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'class': 'bg-gray-100 rounded-lg p-2'}"}), "(attrs={'class': 'bg-gray-100 rounded-lg p-2'})\n", (1105, 1152), False, 'from django import forms\n')] |
from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
class OracleType(ClassArrayType):
"""
A class used to represent Oracle class
"""
def __init__(self):
super().__init__('Oracle')
self._variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._constructor: Method = None
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
# avoid recursive import
from boa3.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod
from boa3.model.builtin.interop.oracle.oraclerequestmethod import OracleRequestMethod
if len(self._class_methods) == 0:
self._class_methods = {
'get_price': OracleGetPriceMethod(),
'request': OracleRequestMethod()
}
return self._class_methods
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
return self._constructor
@classmethod
def build(cls, value: Any = None) -> OracleType:
if value is None or cls._is_type_of(value):
return _Oracle
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, OracleType)
_Oracle = OracleType()
| [
"boa3.model.builtin.interop.oracle.oraclegetpricemethod.OracleGetPriceMethod",
"boa3.model.builtin.interop.oracle.oraclerequestmethod.OracleRequestMethod"
] | [((1319, 1341), 'boa3.model.builtin.interop.oracle.oraclegetpricemethod.OracleGetPriceMethod', 'OracleGetPriceMethod', ([], {}), '()\n', (1339, 1341), False, 'from boa3.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod\n'), ((1370, 1391), 'boa3.model.builtin.interop.oracle.oraclerequestmethod.OracleRequestMethod', 'OracleRequestMethod', ([], {}), '()\n', (1389, 1391), False, 'from boa3.model.builtin.interop.oracle.oraclerequestmethod import OracleRequestMethod\n')] |
import os, glob
try:
os.mkdir("output")
except:
pass
wiiudir="input/wiiu"
try:
os.makedirs(wiiudir)
print('The directories have been made.')
input('Insert your textures in input/wiiu and then run the tool again to convert it.')
except:
pass
dir = 'input/temp'
try:
os.makedirs(dir)
except:
pass
try:
for ckdtextures in os.listdir(wiiudir):
with open(wiiudir+'/'+ckdtextures,'rb') as f:
f.read(44)
data = f.read()
dds=open('input/temp/'+ckdtextures.replace('.tga.ckd','.gtx').replace('.png.ckd','.gtx'),'wb')
dds.write(data)
dds.close()
except:
pass
try:
for gtx in os.listdir(dir):
print('making '+gtx.replace(".gtx","")+'...')
os.system("texconv2 -i input/temp/"+gtx+" -o output/"+gtx.replace(".gtx",".dds"))
except:
pass
filelist = glob.glob(os.path.join(dir, "*"))
for f in filelist:
os.remove(f)
os.rmdir(dir) | [
"os.listdir",
"os.makedirs",
"os.path.join",
"os.rmdir",
"os.mkdir",
"os.remove"
] | [((1008, 1021), 'os.rmdir', 'os.rmdir', (['dir'], {}), '(dir)\n', (1016, 1021), False, 'import os, glob\n'), ((29, 47), 'os.mkdir', 'os.mkdir', (['"""output"""'], {}), "('output')\n", (37, 47), False, 'import os, glob\n'), ((106, 126), 'os.makedirs', 'os.makedirs', (['wiiudir'], {}), '(wiiudir)\n', (117, 126), False, 'import os, glob\n'), ((329, 345), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (340, 345), False, 'import os, glob\n'), ((399, 418), 'os.listdir', 'os.listdir', (['wiiudir'], {}), '(wiiudir)\n', (409, 418), False, 'import os, glob\n'), ((735, 750), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (745, 750), False, 'import os, glob\n'), ((943, 965), 'os.path.join', 'os.path.join', (['dir', '"""*"""'], {}), "(dir, '*')\n", (955, 965), False, 'import os, glob\n'), ((992, 1004), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (1001, 1004), False, 'import os, glob\n')] |
import sys
n = int(sys.stdin.readline().rstrip())
ab = map(int, sys.stdin.read().split())
ab = list(zip(ab, ab))
def main():
c_a = ab[0][0]
c_b = ab[0][1]
for a, b in ab[1:]:
ratio = a / b
while c_a / c_b != ratio:
if c_a / c_b < ratio:
c_a += 1
else:
c_b += 1
ans = c_a + c_b
return ans
if __name__ == "__main__":
ans = main()
print(ans)
| [
"sys.stdin.readline",
"sys.stdin.read"
] | [((22, 42), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (40, 42), False, 'import sys\n'), ((68, 84), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (82, 84), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (c) 2015 <NAME>
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import warnings
class Profile(object):
''' base Profile class '''
def __init__(self, name, version, title, url,
namespace, typename, outputschema, prefixes, model, core_namespaces,
added_namespaces,repository):
''' Initialize profile '''
self.name = name
self.version = version
self.title = title
self.url = url
self.namespace = namespace
self.typename = typename
self.outputschema = outputschema
self.prefixes = prefixes
self.repository = repository
if 'DescribeRecord' in model['operations']:
model['operations']['DescribeRecord']['parameters']\
['typeName']['values'].append(self.typename)
model['operations']['GetRecords']['parameters']['outputSchema']\
['values'].append(self.outputschema)
model['operations']['GetRecords']['parameters']['typeNames']\
['values'].append(self.typename)
model['operations']['GetRecordById']['parameters']['outputSchema']\
['values'].append(self.outputschema)
if 'Harvest' in model['operations']:
model['operations']['Harvest']['parameters']['ResourceType']\
['values'].append(self.outputschema)
# namespaces
core_namespaces.update(added_namespaces)
# repository
model['typenames'][self.typename] = self.repository
def extend_core(self, model, namespaces, config):
''' Extend config.model and config.namespaces '''
raise NotImplementedError
def check_parameters(self):
''' Perform extra parameters checking.
Return dict with keys "locator", "code", "text" or None '''
raise NotImplementedError
def get_extendedcapabilities(self):
''' Return ExtendedCapabilities child as lxml.etree.Element '''
raise NotImplementedError
def get_schemacomponents(self):
''' Return schema components as lxml.etree.Element list '''
raise NotImplementedError
def check_getdomain(self, kvp):
'''Perform extra profile specific checks in the GetDomain request'''
raise NotImplementedError
def write_record(self, result, esn, outputschema, queryables):
''' Return csw:SearchResults child as lxml.etree.Element '''
raise NotImplementedError
def transform2dcmappings(self, queryables):
''' Transform information model mappings into csw:Record mappings '''
raise NotImplementedError
def load_profiles(path, cls, profiles):
''' load CSW profiles, return dict by class name '''
def look_for_subclass(modulename):
module = __import__(modulename)
dmod = module.__dict__
for modname in modulename.split('.')[1:]:
dmod = dmod[modname].__dict__
for key, entry in dmod.items():
if key == cls.__name__:
continue
try:
if issubclass(entry, cls):
aps['plugins'][key] = entry
except TypeError:
continue
aps = {}
aps['plugins'] = {}
aps['loaded'] = {}
for prof in profiles.split(','):
# fgdc, atom, dif, gm03 are supported in core
# no need to specify them explicitly anymore
# provide deprecation warning
# https://github.com/geopython/pycsw/issues/118
if prof in ['fgdc', 'atom', 'dif', 'gm03']:
warnings.warn('%s is now a core module, and does not need to be'
' specified explicitly. So you can remove %s from '
'server.profiles' % (prof, prof))
else:
modulename='%s.%s.%s' % (path.replace(os.sep, '.'), prof, prof)
look_for_subclass(modulename)
return aps
| [
"warnings.warn"
] | [((4782, 4937), 'warnings.warn', 'warnings.warn', (["('%s is now a core module, and does not need to be specified explicitly. So you can remove %s from server.profiles'\n % (prof, prof))"], {}), "(\n '%s is now a core module, and does not need to be specified explicitly. So you can remove %s from server.profiles'\n % (prof, prof))\n", (4795, 4937), False, 'import warnings\n')] |
# source http://itasuke.hatenablog.com/entry/2018/01/08/133510
import winreg
newkey = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
newkey.Close()
winreg.DeleteKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
| [
"winreg.CreateKeyEx",
"winreg.DeleteKeyEx"
] | [((88, 166), 'winreg.CreateKeyEx', 'winreg.CreateKeyEx', (['winreg.HKEY_CURRENT_USER', '"""Software\\\\__javacommons__\\\\abc"""'], {}), "(winreg.HKEY_CURRENT_USER, 'Software\\\\__javacommons__\\\\abc')\n", (106, 166), False, 'import winreg\n'), ((183, 261), 'winreg.DeleteKeyEx', 'winreg.DeleteKeyEx', (['winreg.HKEY_CURRENT_USER', '"""Software\\\\__javacommons__\\\\abc"""'], {}), "(winreg.HKEY_CURRENT_USER, 'Software\\\\__javacommons__\\\\abc')\n", (201, 261), False, 'import winreg\n')] |
import inspect
import re
import textwrap
import pytest
import pkg_resources
from .test_resources import Metadata
def strip_comments(s):
return '\n'.join(
l for l in s.split('\n')
if l.strip() and not l.strip().startswith('#')
)
def parse_distributions(s):
'''
Parse a series of distribution specs of the form:
{project_name}-{version}
[optional, indented requirements specification]
Example:
foo-0.2
bar-1.0
foo>=3.0
[feature]
baz
yield 2 distributions:
- project_name=foo, version=0.2
- project_name=bar, version=1.0, requires=['foo>=3.0', 'baz; extra=="feature"']
'''
s = s.strip()
for spec in re.split('\n(?=[^\s])', s):
if not spec:
continue
fields = spec.split('\n', 1)
assert 1 <= len(fields) <= 2
name, version = fields.pop(0).split('-')
if fields:
requires = textwrap.dedent(fields.pop(0))
metadata=Metadata(('requires.txt', requires))
else:
metadata = None
dist = pkg_resources.Distribution(project_name=name,
version=version,
metadata=metadata)
yield dist
class FakeInstaller(object):
def __init__(self, installable_dists):
self._installable_dists = installable_dists
def __call__(self, req):
return next(iter(filter(lambda dist: dist in req,
self._installable_dists)), None)
def parametrize_test_working_set_resolve(*test_list):
idlist = []
argvalues = []
for test in test_list:
(
name,
installed_dists,
installable_dists,
requirements,
expected1, expected2
) = [
strip_comments(s.lstrip()) for s in
textwrap.dedent(test).lstrip().split('\n\n', 5)
]
installed_dists = list(parse_distributions(installed_dists))
installable_dists = list(parse_distributions(installable_dists))
requirements = list(pkg_resources.parse_requirements(requirements))
for id_, replace_conflicting, expected in (
(name, False, expected1),
(name + '_replace_conflicting', True, expected2),
):
idlist.append(id_)
expected = strip_comments(expected.strip())
if re.match('\w+$', expected):
expected = getattr(pkg_resources, expected)
assert issubclass(expected, Exception)
else:
expected = list(parse_distributions(expected))
argvalues.append(pytest.param(installed_dists, installable_dists,
requirements, replace_conflicting,
expected))
return pytest.mark.parametrize('installed_dists,installable_dists,'
'requirements,replace_conflicting,'
'resolved_dists_or_exception',
argvalues, ids=idlist)
@parametrize_test_working_set_resolve(
'''
# id
noop
# installed
# installable
# wanted
# resolved
# resolved [replace conflicting]
''',
'''
# id
already_installed
# installed
foo-3.0
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
installable_not_installed
# installed
# installable
foo-3.0
foo-4.0
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
not_installable
# installed
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
no_matching_version
# installed
# installable
foo-3.1
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installable_with_installed_conflict
# installed
foo-3.1
# installable
foo-3.5
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
foo-3.5
''',
'''
# id
not_installable_with_installed_conflict
# installed
foo-3.1
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installed_require
# installed
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installed_with_conflicting_installed_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installable_conflicting_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-2.9
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
installed_with_installable_require
# installed
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-3.9
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installed_require
# installed
foo-3.9
# installable
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installable_require
# installed
# installable
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_conflicting_installable_require
# installed
foo-5
# installable
foo-2.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
conflicting_installables
# installed
# installable
foo-2.9
foo-5.0
# wanted
foo>=2.1,!=3.1,<4
foo>=4
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_requires
# installed
# installable
foo-2.9
dep==1.0
baz-5.0
dep==2.0
dep-1.0
dep-2.0
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_nested_requires
# installed
# installable
foo-2.9
dep1
dep1-1.0
subdep<1.0
baz-5.0
dep2
dep2-1.0
subdep>1.0
subdep-0.9
subdep-1.1
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
)
def test_working_set_resolve(installed_dists, installable_dists, requirements,
replace_conflicting, resolved_dists_or_exception):
ws = pkg_resources.WorkingSet([])
list(map(ws.add, installed_dists))
resolve_call = lambda: ws.resolve(
requirements, installer=FakeInstaller(installable_dists),
replace_conflicting=replace_conflicting,
)
if inspect.isclass(resolved_dists_or_exception):
with pytest.raises(resolved_dists_or_exception):
resolve_call()
else:
assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)
| [
"re.split",
"textwrap.dedent",
"pkg_resources.Distribution",
"re.match",
"pytest.param",
"pytest.mark.parametrize",
"pkg_resources.parse_requirements",
"pytest.raises",
"pkg_resources.WorkingSet",
"inspect.isclass"
] | [((730, 757), 're.split', 're.split', (['"""\n(?=[^\\\\s])"""', 's'], {}), "('\\n(?=[^\\\\s])', s)\n", (738, 757), False, 'import re\n'), ((2899, 3053), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""installed_dists,installable_dists,requirements,replace_conflicting,resolved_dists_or_exception"""', 'argvalues'], {'ids': 'idlist'}), "(\n 'installed_dists,installable_dists,requirements,replace_conflicting,resolved_dists_or_exception'\n , argvalues, ids=idlist)\n", (2922, 3053), False, 'import pytest\n'), ((7837, 7865), 'pkg_resources.WorkingSet', 'pkg_resources.WorkingSet', (['[]'], {}), '([])\n', (7861, 7865), False, 'import pkg_resources\n'), ((8072, 8116), 'inspect.isclass', 'inspect.isclass', (['resolved_dists_or_exception'], {}), '(resolved_dists_or_exception)\n', (8087, 8116), False, 'import inspect\n'), ((1111, 1197), 'pkg_resources.Distribution', 'pkg_resources.Distribution', ([], {'project_name': 'name', 'version': 'version', 'metadata': 'metadata'}), '(project_name=name, version=version, metadata=\n metadata)\n', (1137, 1197), False, 'import pkg_resources\n'), ((2143, 2189), 'pkg_resources.parse_requirements', 'pkg_resources.parse_requirements', (['requirements'], {}), '(requirements)\n', (2175, 2189), False, 'import pkg_resources\n'), ((2456, 2483), 're.match', 're.match', (['"""\\\\w+$"""', 'expected'], {}), "('\\\\w+$', expected)\n", (2464, 2483), False, 'import re\n'), ((8131, 8173), 'pytest.raises', 'pytest.raises', (['resolved_dists_or_exception'], {}), '(resolved_dists_or_exception)\n', (8144, 8173), False, 'import pytest\n'), ((2709, 2806), 'pytest.param', 'pytest.param', (['installed_dists', 'installable_dists', 'requirements', 'replace_conflicting', 'expected'], {}), '(installed_dists, installable_dists, requirements,\n replace_conflicting, expected)\n', (2721, 2806), False, 'import pytest\n'), ((1915, 1936), 'textwrap.dedent', 'textwrap.dedent', (['test'], {}), '(test)\n', (1930, 1936), False, 'import textwrap\n')] |
from setuptools import find_packages, setup
install_requires = [dep.strip() for dep in open('requirements.txt')]
setup(
name='yolo_tf2',
version='1.5',
packages=find_packages(),
url='https://github.com/schissmantics/yolo-tf2',
license='MIT',
author='schismantics',
author_email='<EMAIL>',
description='yolo(v3/v4) implementation in keras and tensorflow 2.5',
setup_requires=['numpy==1.19.5'],
install_requires=install_requires,
python_requires='>=3.7',
entry_points={
'console_scripts': [
'yolotf2=yolo_tf2.cli:execute',
],
},
)
| [
"setuptools.find_packages"
] | [((175, 190), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (188, 190), False, 'from setuptools import find_packages, setup\n')] |
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def node_match(n1, n2):
if n1['op'] == n2['op']:
return True
else:
return False
def edge_match(e1, e2):
return True
def gen_graph(adj, ops):
G = nx.DiGraph()
for k, op in enumerate(ops):
G.add_node(k, op=op)
assert adj.shape[0] == adj.shape[1] == len(ops)
for row in range(len(ops)):
for col in range(row + 1, len(ops)):
if adj[row, col] > 0:
G.add_edge(row, col)
return G
def preprocess_adj_op(adj, op):
def counting_trailing_false(l):
count = 0
for TF in l[-1::-1]:
if TF:
break
else:
count += 1
return count
def transform_op(op):
idx2op = {0:'input', 1:'conv1x1-bn-relu', 2:'conv3x3-bn-relu', 3:'maxpool3x3', 4:'output'}
return [idx2op[idx] for idx in op.argmax(axis=1)]
adj = np.array(adj).astype(int)
op = np.array(op).astype(int)
assert op.shape[0] == adj.shape[0] == adj.shape[1]
# find all zero columns
adj_zero_col = counting_trailing_false(adj.any(axis=0))
# find all zero rows
adj_zero_row = counting_trailing_false(adj.any(axis=1))
# find all zero rows
op_zero_row = counting_trailing_false(op.any(axis=1))
assert adj_zero_col == op_zero_row == adj_zero_row - 1, 'Inconsistant result {}={}={}'.format(adj_zero_col, op_zero_row, adj_zero_row - 1)
N = op.shape[0] - adj_zero_col
adj = adj[:N, :N]
op = op[:N]
return adj, transform_op(op)
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match) | [
"networkx.DiGraph",
"numpy.array",
"networkx.graph_edit_distance",
"matplotlib.pyplot.subplot",
"networkx.draw"
] | [((253, 265), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (263, 265), True, 'import networkx as nx\n'), ((1623, 1723), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1\n ], [0, 0, 0, 0, 0]])\n', (1631, 1723), True, 'import numpy as np\n'), ((1870, 1970), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1\n ], [0, 0, 0, 0, 0]])\n', (1878, 1970), True, 'import numpy as np\n'), ((2118, 2252), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]])\n', (2126, 2252), True, 'import numpy as np\n'), ((2428, 2562), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 1,\n 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]])\n', (2436, 2562), True, 'import numpy as np\n'), ((2674, 2791), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1], [0, 0, 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]])\n', (2682, 2791), True, 'import numpy as np\n'), ((3061, 3077), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (3072, 3077), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3131), 'networkx.draw', 'nx.draw', (['G1'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G1, with_labels=True, font_weight='bold')\n", (3089, 3131), True, 'import networkx as nx\n'), ((3136, 3152), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (3147, 3152), True, 'import matplotlib.pyplot as plt\n'), ((3157, 3206), 'networkx.draw', 'nx.draw', (['G2'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G2, with_labels=True, font_weight='bold')\n", (3164, 3206), True, 'import networkx as nx\n'), ((3211, 3227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (3222, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3232, 3281), 'networkx.draw', 'nx.draw', (['G3'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G3, with_labels=True, font_weight='bold')\n", (3239, 3281), True, 'import networkx as nx\n'), ((3286, 3302), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(144)'], {}), '(144)\n', (3297, 3302), True, 'import matplotlib.pyplot as plt\n'), ((3307, 3356), 'networkx.draw', 'nx.draw', (['G4'], {'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(G4, with_labels=True, font_weight='bold')\n", (3314, 3356), True, 'import networkx as nx\n'), ((3362, 3438), 'networkx.graph_edit_distance', 'nx.graph_edit_distance', (['G1', 'G2'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(G1, G2, node_match=node_match, edge_match=edge_match)\n', (3384, 3438), True, 'import networkx as nx\n'), ((3442, 3518), 'networkx.graph_edit_distance', 'nx.graph_edit_distance', (['G2', 'G3'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(G2, G3, node_match=node_match, edge_match=edge_match)\n', (3464, 3518), True, 'import networkx as nx\n'), ((959, 972), 'numpy.array', 'np.array', (['adj'], {}), '(adj)\n', (967, 972), True, 'import numpy as np\n'), ((994, 1006), 'numpy.array', 'np.array', (['op'], {}), '(op)\n', (1002, 1006), True, 'import numpy as np\n')] |
import bitmath
class V2RegistryException(Exception):
def __init__(
self,
error_code_str,
message,
detail,
http_status_code=400,
repository=None,
scopes=None,
is_read_only=False,
):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
error_dict = {
"code": self._error_code_str,
"message": str(self),
"detail": self._detail if self._detail is not None else {},
}
if self.is_read_only:
error_dict["is_readonly"] = True
return error_dict
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUnknown, self).__init__("BLOB_UNKNOWN", "blob unknown to registry", detail, 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadInvalid, self).__init__(
"BLOB_UPLOAD_INVALID", "blob upload invalid", detail
)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__(
"BLOB_UPLOAD_UNKNOWN", "blob upload unknown to registry", detail, 404
)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__(
"DIGEST_INVALID", "provided digest did not match uploaded content", detail
)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__(
"MANIFEST_BLOB_UNKNOWN", "manifest blob unknown to registry", detail
)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
super(ManifestInvalid, self).__init__(
"MANIFEST_INVALID", "manifest invalid", detail, http_status_code
)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnknown, self).__init__("MANIFEST_UNKNOWN", "manifest unknown", detail, 404)
class TagExpired(V2RegistryException):
def __init__(self, message=None, detail=None):
super(TagExpired, self).__init__("TAG_EXPIRED", message or "Tag has expired", detail, 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__(
"MANIFEST_UNVERIFIED", "manifest failed signature verification", detail
)
class NameInvalid(V2RegistryException):
def __init__(self, detail=None, message=None):
super(NameInvalid, self).__init__(
"NAME_INVALID", message or "invalid repository name", detail
)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
super(NameUnknown, self).__init__(
"NAME_UNKNOWN", "repository name not known to registry", detail, 404
)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__(
"SIZE_INVALID", "provided length did not match content length", detail
)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
super(TagAlreadyExists, self).__init__(
"TAG_ALREADY_EXISTS", "tag was already pushed", detail, 409
)
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
super(TagInvalid, self).__init__("TAG_INVALID", "manifest tag did not match URI", detail)
class LayerTooLarge(V2RegistryException):
def __init__(self, uploaded=None, max_allowed=None):
detail = {}
message = "Uploaded blob is larger than allowed by this registry"
if uploaded is not None and max_allowed is not None:
detail = {
"reason": "%s is greater than maximum allowed size %s" % (uploaded, max_allowed),
"max_allowed": max_allowed,
"uploaded": uploaded,
}
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
message = "Uploaded blob of %s is larger than %s allowed by this registry" % (
up_str,
max_str,
)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
super(Unauthorized, self).__init__(
"UNAUTHORIZED",
"access to the requested resource is not authorized",
detail,
401,
repository=repository,
scopes=scopes,
)
class Unsupported(V2RegistryException):
def __init__(self, detail=None, message=None):
super(Unsupported, self).__init__(
"UNSUPPORTED", message or "The operation is unsupported.", detail, 405
)
class InvalidLogin(V2RegistryException):
def __init__(self, message=None):
super(InvalidLogin, self).__init__(
"UNAUTHORIZED", message or "Specified credentials are invalid", {}, 401
)
class InvalidRequest(V2RegistryException):
def __init__(self, message=None):
super(InvalidRequest, self).__init__(
"INVALID_REQUEST", message or "Invalid request", {}, 400
)
class NamespaceDisabled(V2RegistryException):
def __init__(self, message=None):
message = message or "This namespace is disabled. Please contact your system administrator."
super(NamespaceDisabled, self).__init__("DENIED", message, {}, 405)
class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
message = (
"The region from which you are pulling has been geo-ip blocked. "
+ "Please contact the namespace owner."
)
super(BlobDownloadGeoBlocked, self).__init__("DENIED", message, detail, 403)
class ReadOnlyMode(V2RegistryException):
def __init__(self, detail=None):
message = (
"System is currently read-only. Pulls will succeed but all write operations "
+ "are currently suspended."
)
super(ReadOnlyMode, self).__init__("DENIED", message, detail, 405, is_read_only=True)
| [
"bitmath.Byte"
] | [((4295, 4317), 'bitmath.Byte', 'bitmath.Byte', (['uploaded'], {}), '(uploaded)\n', (4307, 4317), False, 'import bitmath\n'), ((4383, 4408), 'bitmath.Byte', 'bitmath.Byte', (['max_allowed'], {}), '(max_allowed)\n', (4395, 4408), False, 'import bitmath\n')] |
'''
LICENSE: MIT license
This module can help us know about who can ask when
we have troubles in some buggy codes while solving problems.
'''
from asyncio import gather, get_event_loop
from pandas import DataFrame, set_option
from online_judge import Online_Judge
loop = get_event_loop()
set_option('display.max_colwidth', -1)
class Scoreboard:
'''Handles a dataframe to build up a scoreboard.
Attributes:
problems: (list) A list of problem id which we are tracking.
scoreboard: (Dataframe) A pandas.Dataframe that saves user attempts.
by student id.
online_judge: (Online_Judge) An FOJ api wrapper.
'''
def __init__(self, token, problems, problem_name):
self.problems = problems
self.problem_name = problem_name
self.online_judge = Online_Judge(token)
self.scoreboard = DataFrame()
def update(self):
'''Update scoreboard using web crawler.
Since api return a json message, we can use it to update scoreboard.
'''
tasks = []
async def crawl(problem_id):
return await loop.run_in_executor(None, self.online_judge.get_submission, problem_id)
for problem_id in self.problems:
task = loop.create_task(crawl(problem_id))
tasks.append(task)
temp = dict(
zip(self.problems, loop.run_until_complete(gather(*tasks))))
self.scoreboard = DataFrame.from_dict(temp)
self.scoreboard.index.name = 'Student_ID'
self.scoreboard['Total'] = self.scoreboard.applymap(
lambda x: x == x and x['verdict'] == 10).sum(axis=1)
self.scoreboard['Penalty'] = self.scoreboard.applymap(
lambda x: x['penalty'] if isinstance(x, dict) and x['verdict'] == 10 else 0).sum(axis=1)
self.scoreboard.sort_values(
by=['Total', 'Penalty', 'Student_ID'], inplace=True, ascending=[False, True, True])
def visualize(self):
'''
Make scoreboard table.
Returns:
(str) A html page to be rendered.
'''
def make_verdict_string(x):
verdict = {4: 'CE', 5: 'RE', 6: 'MLE',
7: 'TLE', 8: 'OLE', 9: 'WA', 10: 'AC'}
if x == x:
return '<span class="{}" title="Attempted: {}">{}</span>'.format("right" if x['verdict'] == 10 else "wrong", x['penalty'], verdict[x['verdict']])
else:
return '<span class="none" title="Not Attempt">N/A</span>'
css = """<style type="text/css">
html,body{
margin:0;
padding:0;
height:100%;
width:100%;
}
.row_heading {width:70px}
.wrong {background-color:red}
.right {background-color:green}
.none {background-color:gray}
span{
text-align:center;
display:block;
width:60px;
}
th, td{
text-align:center;
width:60px;
}
a{
text-decoration:none;
color:black;
}
</style>
"""
scoreboard = self.scoreboard.drop(columns=['Total', 'Penalty']).applymap(
make_verdict_string)
scoreboard.index.name = None
scoreboard.index = scoreboard.index.map(
'<a href="https://oj.nctu.me/groups/11/submissions/?name={0}" target="_blank">{0}</a>'.format)
scoreboard.rename(lambda x: '<a href="https://oj.nctu.me/problems/{1}/" target="_blank" <span title="{0}">{1}</span></a>'.format(self.problem_name[str(x)], x),
axis='columns', inplace=True)
return css + scoreboard.to_html(border=0, max_cols=None, max_rows=None, escape=False)
| [
"pandas.DataFrame.from_dict",
"pandas.set_option",
"asyncio.gather",
"pandas.DataFrame",
"asyncio.get_event_loop",
"online_judge.Online_Judge"
] | [((276, 292), 'asyncio.get_event_loop', 'get_event_loop', ([], {}), '()\n', (290, 292), False, 'from asyncio import gather, get_event_loop\n'), ((293, 331), 'pandas.set_option', 'set_option', (['"""display.max_colwidth"""', '(-1)'], {}), "('display.max_colwidth', -1)\n", (303, 331), False, 'from pandas import DataFrame, set_option\n'), ((826, 845), 'online_judge.Online_Judge', 'Online_Judge', (['token'], {}), '(token)\n', (838, 845), False, 'from online_judge import Online_Judge\n'), ((872, 883), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (881, 883), False, 'from pandas import DataFrame, set_option\n'), ((1450, 1475), 'pandas.DataFrame.from_dict', 'DataFrame.from_dict', (['temp'], {}), '(temp)\n', (1469, 1475), False, 'from pandas import DataFrame, set_option\n'), ((1405, 1419), 'asyncio.gather', 'gather', (['*tasks'], {}), '(*tasks)\n', (1411, 1419), False, 'from asyncio import gather, get_event_loop\n')] |
import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from transforms import Scale
class CLEVR(Dataset):
def __init__(self, root, split='train', transform=None):
features_path = os.path.join(root, 'features')
with open('{}/{}.pkl'.format(features_path, split), 'rb') as f:
self.data = pickle.load(f)
# self.transform = transform
self.root = root
self.split = split
self.h = h5py.File('{}/{}_features.hdf5'.format(features_path, split), 'r')
self.img = self.h['data']
def close(self):
self.h.close()
def __getitem__(self, index):
imgfile, question, answer, family = self.data[index]
# img = Image.open(os.path.join(self.root, 'images',
# self.split, imgfile)).convert('RGB')
# img = self.transform(img)
id = int(imgfile.rsplit('_', 1)[1][:-4])
img = torch.from_numpy(self.img[id])
return img, question, len(question), answer, family, index
def __len__(self):
return len(self.data)
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
def collate_data(batch):
images, lengths, answers, families, idxs = [], [], [], [], []
batch_size = len(batch)
max_len = max(map(lambda x: len(x[1]), batch))
questions = np.zeros((batch_size, max_len), dtype=np.int64)
sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)
for i, b in enumerate(sort_by_len):
image, question, length, answer, family, idx = b
images.append(image)
length = len(question)
questions[i, :length] = question
lengths.append(length)
answers.append(answer)
families.append(family)
idxs.append(idx)
return torch.stack(images), torch.from_numpy(questions), \
lengths, torch.LongTensor(answers), families, idxs
| [
"transforms.Scale",
"torch.LongTensor",
"torch.stack",
"os.path.join",
"pickle.load",
"torch.from_numpy",
"torchvision.transforms.RandomCrop",
"numpy.zeros",
"torchvision.transforms.Normalize",
"torchvision.transforms.Pad",
"torchvision.transforms.ToTensor"
] | [((1611, 1658), 'numpy.zeros', 'np.zeros', (['(batch_size, max_len)'], {'dtype': 'np.int64'}), '((batch_size, max_len), dtype=np.int64)\n', (1619, 1658), True, 'import numpy as np\n'), ((301, 331), 'os.path.join', 'os.path.join', (['root', '"""features"""'], {}), "(root, 'features')\n", (313, 331), False, 'import os\n'), ((1028, 1058), 'torch.from_numpy', 'torch.from_numpy', (['self.img[id]'], {}), '(self.img[id])\n', (1044, 1058), False, 'import torch\n'), ((1219, 1236), 'transforms.Scale', 'Scale', (['[224, 224]'], {}), '([224, 224])\n', (1224, 1236), False, 'from transforms import Scale\n'), ((1242, 1259), 'torchvision.transforms.Pad', 'transforms.Pad', (['(4)'], {}), '(4)\n', (1256, 1259), False, 'from torchvision import transforms\n'), ((1265, 1298), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['[224, 224]'], {}), '([224, 224])\n', (1286, 1298), False, 'from torchvision import transforms\n'), ((1304, 1325), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1323, 1325), False, 'from torchvision import transforms\n'), ((1331, 1394), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (1351, 1394), False, 'from torchvision import transforms\n'), ((2060, 2079), 'torch.stack', 'torch.stack', (['images'], {}), '(images)\n', (2071, 2079), False, 'import torch\n'), ((2081, 2108), 'torch.from_numpy', 'torch.from_numpy', (['questions'], {}), '(questions)\n', (2097, 2108), False, 'import torch\n'), ((2129, 2154), 'torch.LongTensor', 'torch.LongTensor', (['answers'], {}), '(answers)\n', (2145, 2154), False, 'import torch\n'), ((428, 442), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (439, 442), False, 'import pickle\n')] |
import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
@pytest.fixture
def lognormal():
return LogNormal()
def test_target_validation(lognormal):
valid_target = np.array([0.5, 1, 4, 5, 10])
lognormal.check_target(valid_target)
@pytest.mark.parametrize(
"invalid_target",
[np.array([0, 1.2]), pd.Series([-1.1, 0.4, 2.3])],
)
def test_target_validation_raises(lognormal, invalid_target):
with pytest.raises(ValueError):
lognormal.check_target(invalid_target)
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
True,
np.array([[0, 0.5], [1, 0]]),
),
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(lognormal, y, params, natural_gradient, expected_grad):
grad, hess = lognormal.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
| [
"pandas.Series",
"numpy.log",
"xgboost_distribution.distributions.LogNormal",
"numpy.array",
"pytest.raises",
"numpy.testing.assert_array_equal"
] | [((158, 169), 'xgboost_distribution.distributions.LogNormal', 'LogNormal', ([], {}), '()\n', (167, 169), False, 'from xgboost_distribution.distributions import LogNormal\n'), ((230, 258), 'numpy.array', 'np.array', (['[0.5, 1, 4, 5, 10]'], {}), '([0.5, 1, 4, 5, 10])\n', (238, 258), True, 'import numpy as np\n'), ((1160, 1210), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['grad', 'expected_grad'], {}), '(grad, expected_grad)\n', (1189, 1210), True, 'import numpy as np\n'), ((478, 503), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (491, 503), False, 'import pytest\n'), ((355, 373), 'numpy.array', 'np.array', (['[0, 1.2]'], {}), '([0, 1.2])\n', (363, 373), True, 'import numpy as np\n'), ((375, 402), 'pandas.Series', 'pd.Series', (['[-1.1, 0.4, 2.3]'], {}), '([-1.1, 0.4, 2.3])\n', (384, 402), True, 'import pandas as pd\n'), ((658, 674), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (666, 674), True, 'import numpy as np\n'), ((754, 782), 'numpy.array', 'np.array', (['[[0, 0.5], [1, 0]]'], {}), '([[0, 0.5], [1, 0]])\n', (762, 782), True, 'import numpy as np\n'), ((817, 833), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (825, 833), True, 'import numpy as np\n'), ((914, 940), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (922, 940), True, 'import numpy as np\n'), ((1312, 1325), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1320, 1325), True, 'import numpy as np\n'), ((1344, 1362), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (1352, 1362), True, 'import numpy as np\n'), ((699, 708), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (705, 708), True, 'import numpy as np\n'), ((858, 867), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (864, 867), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Main script for workload forecasting.
Example usage:
- Generate data (runs OLTP benchmark on the built database) and perform training, and save the trained model
./forecaster --gen_data --models=LSTM --model_save_path=model.pickle
- Use the trained models (LSTM) to generate predictions.
./forecaster --model_load_path=model.pickle --test_file=test_query.csv --test_model=LSTM
TODO:
- Better metrics for training and prediction (currently not focusing on models' accuracy yet)
- Multiple models (currently only simple-one-layer-untuned LSTM used)
- API and interaction with Pilot
"""
import argparse
import json
import pickle
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ..testing.self_driving.constants import (DEFAULT_ITER_NUM,
DEFAULT_QUERY_TRACE_FILE,
DEFAULT_TPCC_WEIGHTS,
DEFAULT_WORKLOAD_PATTERN)
from ..testing.self_driving.forecast import gen_oltp_trace
from ..testing.util.constants import LOG
from .cluster import QueryCluster
from .data_loader import DataLoader
from .models import ForecastModel, get_models
# Interval duration for aggregation in microseconds
INTERVAL_MICRO_SEC = 500000
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
# Number of data points in a sequence
SEQ_LEN = 10 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for the horizon
HORIZON_LEN = 30 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for testing set
EVAL_DATA_SIZE = 2 * SEQ_LEN + HORIZON_LEN
argp = argparse.ArgumentParser(description="Query Load Forecaster")
# Generation stage related options
argp.add_argument(
"--gen_data",
default=False,
action="store_true",
help="If specified, OLTP benchmark would be downloaded and built to generate the query trace data")
argp.add_argument(
"--tpcc_weight",
type=str,
default=DEFAULT_TPCC_WEIGHTS,
help="Workload weights for the TPCC")
argp.add_argument(
"--tpcc_rates",
nargs="+",
default=DEFAULT_WORKLOAD_PATTERN,
help="Rate array for the TPCC workload")
argp.add_argument(
"--pattern_iter",
type=int,
default=DEFAULT_ITER_NUM,
help="Number of iterations the DEFAULT_WORKLOAD_PATTERN should be run")
argp.add_argument("--trace_file", default=DEFAULT_QUERY_TRACE_FILE,
help="Path to the query trace file", metavar="FILE")
# Model specific
argp.add_argument("--models", nargs='+', type=str, help="Models to use")
argp.add_argument("--models_config", type=str, metavar="FILE",
help="Models and init arguments JSON config file")
argp.add_argument("--seq_len", type=int, default=SEQ_LEN,
help="Length of one sequence in number of data points")
argp.add_argument(
"--horizon_len",
type=int,
default=HORIZON_LEN,
help="Length of the horizon in number of data points, "
"aka, how many further in the a sequence is used for prediction"
)
# Training stage related options
argp.add_argument("--model_save_path", metavar="FILE",
help="Where the model trained will be stored")
argp.add_argument(
"--eval_size",
type=int,
default=EVAL_DATA_SIZE,
help="Length of the evaluation data set length in number of data points")
argp.add_argument("--lr", type=float, default=0.001, help="Learning rate")
argp.add_argument("--epochs", type=int, default=10,
help="Number of epochs for training")
# Testing stage related options
argp.add_argument(
"--model_load_path",
default="model.pickle",
metavar="FILE",
help="Where the model should be loaded from")
argp.add_argument(
"--test_file",
help="Path to the test query trace file",
metavar="FILE")
argp.add_argument(
"--test_model",
type=str,
help="Model to be used for forecasting"
)
class Forecaster:
"""
A wrapper around various ForecastModels, that prepares training and evaluation data.
"""
TRAIN_DATA_IDX = 0
TEST_DATA_IDX = 1
def __init__(
self,
trace_file: str,
interval_us: int = INTERVAL_MICRO_SEC,
test_mode: bool = False,
eval_size: int = EVAL_DATA_SIZE,
seq_len: int = SEQ_LEN,
horizon_len: int = HORIZON_LEN) -> None:
"""
Initializer
:param trace_file: trace file for the forecaster
:param interval_us: number of microseconds for the time-series interval
:param test_mode: True If the Loader is for testing
:param eval_size: Number of data points used for evaluation(testing)
:param seq_len: Length of a sequence
:param horizon_len: Horizon length
"""
self._seq_len = seq_len
self._horizon_len = horizon_len
self._test_mode = test_mode
self._eval_data_size = eval_size
self._data_loader = DataLoader(
query_trace_file=trace_file,
interval_us=interval_us)
self._make_clusters()
def _make_clusters(self) -> None:
"""
Extract data from the DataLoader and put them into different clusters.
:return: None
"""
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now. A future TODO would have a clustering
# process that separates traces into multiple clusters
self._clusters = [QueryCluster(self._data_loader.get_ts_data())]
self._cluster_data = []
for cluster in self._clusters:
# Aggregated time-series from the cluster
data = cluster.get_timeseries()
train_raw_data, test_raw_data = self._split_data(data)
self._cluster_data.append((train_raw_data, test_raw_data))
def _split_data(self, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Split the raw data into a training set, and a testing(evaluation) set.
:param data: All the raw data
:return: traing, test raw data set
"""
if self._test_mode:
self._test_set_size = len(data)
else:
self._test_set_size = self._eval_data_size
if self._test_set_size > len(data):
raise ValueError(
"Eval data size is too small. Not enough data points.")
split_idx = len(data) - self._test_set_size
# First part as the training set
train_raw_data = data[:split_idx]
# Last part as the testing set
test_raw_data = data[split_idx:]
return train_raw_data, test_raw_data
def _make_seqs(self,
input_data: np.ndarray,
start: int,
end: int,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series.
:param input_data: Input time-series
:param start: Start index (inclusive) of the first sequence to be made
:param end: End index (exclusive) of the last sequence to be made
:param with_label: True if label in a certain horizon is added
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
seq_len = self._seq_len
horizon = self._horizon_len
seq_start = start
if with_label:
# Reserve space for horizon
seq_end = end - seq_len - horizon
else:
# Use all data for prediction
seq_end = end - seq_len
if seq_end <= seq_start:
raise IndexError(f"Not enough data points to make sequences")
seqs = []
for i in range(seq_start, seq_end):
seq = input_data[i:i + seq_len].reshape(-1, 1)
# Look beyond the horizon to get the label
if with_label:
label_i = i + seq_len + horizon
label = input_data[label_i: label_i + 1].reshape(1, -1)
seqs.append((seq, label))
else:
seqs.append(seq)
return seqs
@lru_cache(maxsize=32)
def _cluster_seqs(self,
cluster_id: int,
test_mode: bool = False,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series. A cached wrapper
over _make_seqs with different options.
:param cluster_id: Cluster id
:param test_mode: True if using test dataset, otherwise use the training dataset
:param with_label: True if label (time-series data in a horizon from the sequence) is also added.
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
if test_mode:
input_data = self._cluster_data[cluster_id][self.TEST_DATA_IDX]
else:
input_data = self._cluster_data[cluster_id][self.TRAIN_DATA_IDX]
seqs = self._make_seqs(
input_data,
0,
len(input_data),
with_label=with_label)
return seqs
def train(self, models_kwargs: Dict) -> List[List[ForecastModel]]:
"""
:param models_kwargs: A dictionary of models' init arguments
:return: List of models(a list of models) for each cluster.
"""
models = []
for cid in range(len(self._cluster_data)):
cluster_models = get_models(models_kwargs)
train_seqs = self._cluster_seqs(
cid, test_mode=False, with_label=True)
for model_name, model in cluster_models.items():
# Fit the model
model.fit(train_seqs)
self.eval(cid, model)
models.append(cluster_models)
return models
def eval(self, cid: int, model: ForecastModel) -> None:
"""
Evaluate a fitted model on the test dataset.
:param cid: Cluster id
:param model: Model to use
"""
eval_seqs = self._cluster_seqs(cid, test_mode=True, with_label=True)
preds = []
gts = []
for seq, label in eval_seqs:
pred = model.predict(seq)
preds.append(pred)
gts.append(label.item())
# FIXME:
# simple L2 norm for comparing the prediction and results
l2norm = np.linalg.norm(np.array(preds) - np.array(gts))
LOG.info(
f"[{model.name}] has L2 norm(prediction, ground truth) = {l2norm}")
def predict(self, cid: int, model: ForecastModel) -> Dict:
"""
Output prediction on the test dataset, and segregate the predicted cluster time-series into individual queries
:param cid: Cluser id
:param model: Model to use
:return: Dict of {query_id -> time-series}
"""
test_seqs = self._cluster_seqs(cid, test_mode=True, with_label=False)
preds = list([model.predict(seq) for seq in test_seqs])
query_preds = self._clusters[cid].segregate(preds)
return query_preds
def parse_model_config(model_names: Optional[List[str]],
models_config: Optional[str]) -> Dict:
"""
Load models from
:param model_names: List of model names
:param models_config: JSON model config file
:return: Merged model config Dict
"""
model_kwargs = dict([(model_name, {}) for model_name in model_names])
if models_config is not None:
with open(models_config, 'r') as f:
custom_config = json.load(f)
# Simple and non-recursive merging of options
model_kwargs.update(custom_config)
if len(model_kwargs) < 1:
raise ValueError("At least 1 model needs to be used.")
return model_kwargs
if __name__ == "__main__":
args = argp.parse_args()
if args.test_file is None:
# Parse models arguments
models_kwargs = parse_model_config(args.models, args.models_config)
# Generate OLTP trace file
if args.gen_data:
gen_oltp_trace(
tpcc_weight=args.tpcc_weight,
tpcc_rates=args.tpcc_rates,
pattern_iter=args.pattern_iter)
trace_file = DEFAULT_QUERY_TRACE_FILE
else:
trace_file = args.trace_file
forecaster = Forecaster(
trace_file=trace_file,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
models = forecaster.train(models_kwargs)
# Save the model
if args.model_save_path:
with open(args.model_save_path, "wb") as f:
pickle.dump(models, f)
else:
# Do inference on a trained model
with open(args.model_load_path, "rb") as f:
models = pickle.load(f)
forecaster = Forecaster(
trace_file=args.test_file,
test_mode=True,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
query_pred = forecaster.predict(0, models[0][args.test_model])
# TODO:
# How are we consuming predictions?
for qid, ts in query_pred.items():
LOG.info(f"[Query: {qid}] pred={ts[:10]}")
| [
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"numpy.array",
"json.load",
"functools.lru_cache"
] | [((1707, 1767), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query Load Forecaster"""'}), "(description='Query Load Forecaster')\n", (1730, 1767), False, 'import argparse\n'), ((8504, 8525), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(32)'}), '(maxsize=32)\n', (8513, 8525), False, 'from functools import lru_cache\n'), ((12216, 12228), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12225, 12228), False, 'import json\n'), ((13542, 13556), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (13553, 13556), False, 'import pickle\n'), ((11063, 11078), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (11071, 11078), True, 'import numpy as np\n'), ((11081, 11094), 'numpy.array', 'np.array', (['gts'], {}), '(gts)\n', (11089, 11094), True, 'import numpy as np\n'), ((13394, 13416), 'pickle.dump', 'pickle.dump', (['models', 'f'], {}), '(models, f)\n', (13405, 13416), False, 'import pickle\n')] |
from copy import copy
from hashlib import md5
from pickle import Pickler, MARK, DICT
from types import DictionaryType
from .lib import StringIO
class CanonicalizingPickler(Pickler):
dispatch = copy(Pickler.dispatch)
def save_set(self, obj):
rv = obj.__reduce_ex__(0)
rv = (rv[0], (sorted(rv[1][0]),), rv[2])
self.save_reduce(obj=obj, *rv)
dispatch[set] = save_set
def save_dict(self, obj):
write = self.write
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(sorted(obj.iteritems()))
dispatch[DictionaryType] = save_dict
def pickle(obj):
file = StringIO()
CanonicalizingPickler(file, 0).dump(obj)
return md5(file.getvalue()).hexdigest()
| [
"copy.copy"
] | [((199, 221), 'copy.copy', 'copy', (['Pickler.dispatch'], {}), '(Pickler.dispatch)\n', (203, 221), False, 'from copy import copy\n')] |
'''Meeus: Astronomical Algorithms (2nd ed.), chapter 25'''
import math
from nutation_ecliptic import ecliptic
from constants import AU
def coordinates(jd):
'''equatorial coordinates of Sun'''
lon=math.radians(longitude(jd))
eps=math.radians(ecliptic(jd))
ra=math.degrees(math.atan2(math.cos(eps)*math.sin(lon),math.cos(lon)))
dec=math.degrees(math.asin(math.sin(eps)*math.sin(lon)))
return ra,dec
def longitude(jd):
'''longitude of Sun'''
T=(jd-2451545)/36525.
L=math.radians(280.46646+36000.76983*T+0.0003032*T**2)
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
lon=L+C
return math.degrees(lon)
def distance(jd,km=True):
'''Earth-Sun distance in km'''
T=(jd-2451545)/36525.
e=0.016708634-0.000042037*T-0.0000001267*T**2
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
nu=M+C
R=1.000001018*(1-e**2)/(1+e*math.cos(nu))
if km: R*=AU
return R
| [
"math.degrees",
"math.radians",
"math.cos",
"math.sin",
"nutation_ecliptic.ecliptic"
] | [((535, 597), 'math.radians', 'math.radians', (['(280.46646 + 36000.76983 * T + 0.0003032 * T ** 2)'], {}), '(280.46646 + 36000.76983 * T + 0.0003032 * T ** 2)\n', (547, 597), False, 'import math\n'), ((594, 656), 'math.radians', 'math.radians', (['(357.52911 + 35999.05029 * T - 0.0001537 * T ** 2)'], {}), '(357.52911 + 35999.05029 * T - 0.0001537 * T ** 2)\n', (606, 656), False, 'import math\n'), ((812, 829), 'math.degrees', 'math.degrees', (['lon'], {}), '(lon)\n', (824, 829), False, 'import math\n'), ((975, 1037), 'math.radians', 'math.radians', (['(357.52911 + 35999.05029 * T - 0.0001537 * T ** 2)'], {}), '(357.52911 + 35999.05029 * T - 0.0001537 * T ** 2)\n', (987, 1037), False, 'import math\n'), ((264, 276), 'nutation_ecliptic.ecliptic', 'ecliptic', (['jd'], {}), '(jd)\n', (272, 276), False, 'from nutation_ecliptic import ecliptic\n'), ((342, 355), 'math.cos', 'math.cos', (['lon'], {}), '(lon)\n', (350, 355), False, 'import math\n'), ((314, 327), 'math.cos', 'math.cos', (['eps'], {}), '(eps)\n', (322, 327), False, 'import math\n'), ((328, 341), 'math.sin', 'math.sin', (['lon'], {}), '(lon)\n', (336, 341), False, 'import math\n'), ((389, 402), 'math.sin', 'math.sin', (['eps'], {}), '(eps)\n', (397, 402), False, 'import math\n'), ((403, 416), 'math.sin', 'math.sin', (['lon'], {}), '(lon)\n', (411, 416), False, 'import math\n'), ((764, 779), 'math.sin', 'math.sin', (['(3 * M)'], {}), '(3 * M)\n', (772, 779), False, 'import math\n'), ((1145, 1160), 'math.sin', 'math.sin', (['(3 * M)'], {}), '(3 * M)\n', (1153, 1160), False, 'import math\n'), ((1208, 1220), 'math.cos', 'math.cos', (['nu'], {}), '(nu)\n', (1216, 1220), False, 'import math\n'), ((707, 718), 'math.sin', 'math.sin', (['M'], {}), '(M)\n', (715, 718), False, 'import math\n'), ((741, 756), 'math.sin', 'math.sin', (['(2 * M)'], {}), '(2 * M)\n', (749, 756), False, 'import math\n'), ((1088, 1099), 'math.sin', 'math.sin', (['M'], {}), '(M)\n', (1096, 1099), False, 'import math\n'), ((1122, 1137), 'math.sin', 'math.sin', (['(2 * M)'], {}), '(2 * M)\n', (1130, 1137), False, 'import math\n')] |
from django.contrib import admin
# Register your models here.
from apps.weapons.models import Weapon
admin.site.register(Weapon)
| [
"django.contrib.admin.site.register"
] | [((103, 130), 'django.contrib.admin.site.register', 'admin.site.register', (['Weapon'], {}), '(Weapon)\n', (122, 130), False, 'from django.contrib import admin\n')] |
from django.db import models
class Nominee(models.Model):
name = models.TextField()
picture_url = models.ImageField(upload_to="nominees/")
description = models.TextField(max_length=350)
class Meta:
verbose_name_plural = "nominees"
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=40)
url_field = models.CharField(max_length=40)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.name
class Indication(models.Model):
nominated = models.ForeignKey(Nominee, on_delete=models.CASCADE)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name="indications"
)
year = models.IntegerField()
annotation = models.TextField(blank=True)
is_winner = models.BooleanField(default=False)
def __str__(self):
return f'"{self.nominated.name}" on "{self.category.name}"'
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.BooleanField",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((71, 89), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (87, 89), False, 'from django.db import models\n'), ((108, 148), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""nominees/"""'}), "(upload_to='nominees/')\n", (125, 148), False, 'from django.db import models\n'), ((167, 199), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(350)'}), '(max_length=350)\n', (183, 199), False, 'from django.db import models\n'), ((350, 381), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (366, 381), False, 'from django.db import models\n'), ((398, 429), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)'}), '(max_length=40)\n', (414, 429), False, 'from django.db import models\n'), ((589, 641), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Nominee'], {'on_delete': 'models.CASCADE'}), '(Nominee, on_delete=models.CASCADE)\n', (606, 641), False, 'from django.db import models\n'), ((657, 743), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'on_delete': 'models.CASCADE', 'related_name': '"""indications"""'}), "(Category, on_delete=models.CASCADE, related_name=\n 'indications')\n", (674, 743), False, 'from django.db import models\n'), ((764, 785), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (783, 785), False, 'from django.db import models\n'), ((803, 831), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (819, 831), False, 'from django.db import models\n'), ((848, 882), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (867, 882), False, 'from django.db import models\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from scholarly_citation_finder import config
from scholarly_citation_finder.apps.parser.Parser import Parser
from scholarly_citation_finder.apps.core.models import PublicationUrl
from scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor import GrobidExtractor
from scholarly_citation_finder.lib.file import download_file_pdf, DownloadFailedException, UnexpectedContentTypeException
from scholarly_citation_finder.lib.process import ProcessException
from scholarly_citation_finder.apps.parser.Exceptions import ParserRollbackError
from scholarly_citation_finder.lib.string import normalize_string
from scholarly_citation_finder.tools.extractor.grobid.TeiParser import TeiParserNoDocumentTitle,\
TeiParserNoReferences
from scholarly_citation_finder.tools.nameparser.StringMatching import nearly_match
logger = logging.getLogger(__name__)
class PublicationDocumentExtractor:
'''
Class to extract a document.
'''
NUM_MINIMUM_REFERENCES = 3
def __init__(self, database='default'):
'''
Create object.
:param database: Database name
'''
self.extractor = GrobidExtractor() # used to extract documents
self.parser = Parser(database=database) # used to store results
def extract_and_store(self, publication, url):
'''
Extract the publication from the given URL and store the result.
:param publication:
:param url:
:raise ExtractorNotAvaiableException:
'''
try:
document_meta, references = self.extract(publication.title, publication.id, url=url) # raises ExtractorNotAvaiableException
if document_meta and references:
self.__store_document_meta(publication=publication, document_meta=document_meta)
self.__store_references(publication=publication, url=url, references=references)
return True
# Download failed
except(DownloadFailedException, UnexpectedContentTypeException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Extractor failed
except(ProcessException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Storage failed
except(ParserRollbackError) as e:
logger.warn(e, exc_info=True)
return False
def extract(self, publication_title, publication_id, url):
'''
Try to download the document from the given URL and extract it.
:param publication_title: Title of the publication to check, if it's the correct document
:param publication_id: ID of the publication. Used for the filename of the temporary stored document
:param url: Document URL
:return: Document meta object, references array
False, False if (a) it failed to download the document (b) or the document has no title or references
:raise ProcessException: Extractor failed
:raise ExtractorNotAvaiableException: Extractor is not available
:raise DownloadFailedException: Download failed
:raise UnexpectedContentTypeException: File for given URL has the wrong content type
'''
try:
filename = download_file_pdf(url, path=config.DOWNLOAD_TMP_DIR, name='{}_tmp.pdf'.format(publication_id))
document_meta, references = self.extractor.extract_file(filename, completely=True)
# Check title
document_meta_title = document_meta['publication']['title'].lower().strip()
if not nearly_match(document_meta_title, publication_title):
logger.info('Wrong title! Is "%s", should "%s"' % (document_meta_title, publication_title) )
return False, False
# Check number of references
if len(references) < self.NUM_MINIMUM_REFERENCES:
logger.info('Not enough references')
return False, False
return document_meta, references
# Tei failed (invalid document)
except(TeiParserNoDocumentTitle, TeiParserNoReferences) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
return False, False
def __store_references(self, publication, references, url):
'''
Store the URL and the references.
:param publication: Publication that was extracted
:param references: References list, extracted from the document
:param url: URL of the document that was extracted
:raise ParserRollbackError: Storage (database commit) of the references failed
'''
publication_url = publication.publicationurl_set.create(url=url[:200],
type=PublicationUrl.MIME_TYPE_PDF,
extraction_date=datetime.now())
for reference in references:
# TODO: check if paper already exists (!)
reference['reference']['publication_id'] = publication.id
reference['reference']['source_id'] = publication_url.id
reference['publication']['source'] = '{}:{}'.format(reference['publication']['source'], publication_url.id)
self.parser.parse(**reference)
self.parser.commit() # raises ParserRollbackError
def __store_document_meta(self, publication, document_meta):
'''
Store the extracted head meta data.
:param publication: Publication object
:param document_meta: Extracted head meta data
'''
if 'keywords' in document_meta:
for keyword in document_meta['keywords']:
keyword = normalize_string(keyword)
if len(keyword) <= 100:
publication.publicationkeyword_set.get_or_create(name=keyword)
else:
logger.info('keyword "%s" is too long' % keyword)
| [
"logging.getLogger",
"scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor.GrobidExtractor",
"scholarly_citation_finder.apps.parser.Parser.Parser",
"datetime.datetime.now",
"scholarly_citation_finder.tools.nameparser.StringMatching.nearly_match",
"scholarly_citation_finder.lib.string.normalize... | [((913, 940), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (930, 940), False, 'import logging\n'), ((1229, 1246), 'scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor.GrobidExtractor', 'GrobidExtractor', ([], {}), '()\n', (1244, 1246), False, 'from scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor import GrobidExtractor\n'), ((1297, 1322), 'scholarly_citation_finder.apps.parser.Parser.Parser', 'Parser', ([], {'database': 'database'}), '(database=database)\n', (1303, 1322), False, 'from scholarly_citation_finder.apps.parser.Parser import Parser\n'), ((3684, 3736), 'scholarly_citation_finder.tools.nameparser.StringMatching.nearly_match', 'nearly_match', (['document_meta_title', 'publication_title'], {}), '(document_meta_title, publication_title)\n', (3696, 3736), False, 'from scholarly_citation_finder.tools.nameparser.StringMatching import nearly_match\n'), ((5031, 5045), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5043, 5045), False, 'from datetime import datetime\n'), ((5872, 5897), 'scholarly_citation_finder.lib.string.normalize_string', 'normalize_string', (['keyword'], {}), '(keyword)\n', (5888, 5897), False, 'from scholarly_citation_finder.lib.string import normalize_string\n')] |
import sys
import os
import json
import urllib
from PIL import Image
from flask import Flask, request, redirect, url_for
from flask import send_from_directory, render_template
from werkzeug.utils import secure_filename
from datetime import datetime
from caption_service import CaptionService
from translation_service import TranslationService
sys.path.append(os.curdir) # カレントファイルをインポートするための設定
UPLOAD_FOLDER = '/tmp/uploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__, static_url_path='/static', static_folder='assets/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
cs = CaptionService()
ts = TranslationService()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/flask/uploader', methods=['POST'])
def upload_file():
# check if the post request has the file part
# create a special subfolder for the files uploaded this time
# to avoid overwrite
subdir = datetime.now().strftime('%Y%m%d_%H%M%S')
current_files_dir = os.path.join(UPLOAD_FOLDER, subdir)
os.makedirs(current_files_dir, exist_ok=True)
upload_files = request.files.getlist('file[]')
ret = []
for file in upload_files:
image = {}
print('filename is', file.filename)
filename = secure_filename(file.filename)
image['filename'] = filename
filepath = os.path.join(current_files_dir, filename)
print('file saving to ', filepath)
file.save(filepath)
image['url'] = '/flask/uploads/{}/{}'.format(
subdir, urllib.parse.quote_plus(filename))
print('begin predict', filepath)
caption_en, caption_ja = get_caption(filepath)
image['result'] = caption_ja
ret.append(image)
return json.dumps(ret)
@app.route('/flask/uploads/<path:filepath>')
def uploaded_file(filepath):
print("filepath is {}".format(filepath))
filename = os.path.basename(filepath)
if not filename:
return ""
path = os.path.dirname(filepath)
print("path is {}, filename is {}".format(path, filename))
image_folder = os.path.join(UPLOAD_FOLDER, path)
return send_from_directory(image_folder,
urllib.parse.unquote_plus(filename))
@app.route('/')
def serve_index():
return send_from_directory('assets', 'index.html')
@app.route('/<filename>', defaults={'filename': 'index.html'})
def serve_assets(filename):
return send_from_directory('assets', filename)
def get_caption(filepath):
print('getting caption', filepath)
caption_en = cs.get_caption(filepath)
caption_ja = ts.get_translation(caption_en)
return caption_en, caption_ja
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port)
| [
"translation_service.TranslationService",
"flask.send_from_directory",
"caption_service.CaptionService",
"flask.request.files.getlist",
"os.makedirs",
"flask.Flask",
"json.dumps",
"os.path.join",
"os.environ.get",
"os.path.dirname",
"datetime.datetime.now",
"os.path.basename",
"werkzeug.util... | [((344, 370), 'sys.path.append', 'sys.path.append', (['os.curdir'], {}), '(os.curdir)\n', (359, 370), False, 'import sys\n'), ((428, 469), 'os.makedirs', 'os.makedirs', (['UPLOAD_FOLDER'], {'exist_ok': '(True)'}), '(UPLOAD_FOLDER, exist_ok=True)\n', (439, 469), False, 'import os\n'), ((533, 606), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""', 'static_folder': '"""assets/static"""'}), "(__name__, static_url_path='/static', static_folder='assets/static')\n", (538, 606), False, 'from flask import Flask, request, redirect, url_for\n'), ((657, 673), 'caption_service.CaptionService', 'CaptionService', ([], {}), '()\n', (671, 673), False, 'from caption_service import CaptionService\n'), ((679, 699), 'translation_service.TranslationService', 'TranslationService', ([], {}), '()\n', (697, 699), False, 'from translation_service import TranslationService\n'), ((1112, 1147), 'os.path.join', 'os.path.join', (['UPLOAD_FOLDER', 'subdir'], {}), '(UPLOAD_FOLDER, subdir)\n', (1124, 1147), False, 'import os\n'), ((1152, 1197), 'os.makedirs', 'os.makedirs', (['current_files_dir'], {'exist_ok': '(True)'}), '(current_files_dir, exist_ok=True)\n', (1163, 1197), False, 'import os\n'), ((1218, 1249), 'flask.request.files.getlist', 'request.files.getlist', (['"""file[]"""'], {}), "('file[]')\n", (1239, 1249), False, 'from flask import Flask, request, redirect, url_for\n'), ((1860, 1875), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (1870, 1875), False, 'import json\n'), ((2013, 2039), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2029, 2039), False, 'import os\n'), ((2092, 2117), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (2107, 2117), False, 'import os\n'), ((2201, 2234), 'os.path.join', 'os.path.join', (['UPLOAD_FOLDER', 'path'], {}), '(UPLOAD_FOLDER, path)\n', (2213, 2234), False, 'import os\n'), ((2396, 2439), 'flask.send_from_directory', 'send_from_directory', (['"""assets"""', '"""index.html"""'], {}), "('assets', 'index.html')\n", (2415, 2439), False, 'from flask import send_from_directory, render_template\n'), ((2544, 2583), 'flask.send_from_directory', 'send_from_directory', (['"""assets"""', 'filename'], {}), "('assets', filename)\n", (2563, 2583), False, 'from flask import send_from_directory, render_template\n'), ((2816, 2844), 'os.environ.get', 'os.environ.get', (['"""PORT"""', '(5000)'], {}), "('PORT', 5000)\n", (2830, 2844), False, 'import os\n'), ((1376, 1406), 'werkzeug.utils.secure_filename', 'secure_filename', (['file.filename'], {}), '(file.filename)\n', (1391, 1406), False, 'from werkzeug.utils import secure_filename\n'), ((1464, 1505), 'os.path.join', 'os.path.join', (['current_files_dir', 'filename'], {}), '(current_files_dir, filename)\n', (1476, 1505), False, 'import os\n'), ((2311, 2346), 'urllib.parse.unquote_plus', 'urllib.parse.unquote_plus', (['filename'], {}), '(filename)\n', (2336, 2346), False, 'import urllib\n'), ((1047, 1061), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1059, 1061), False, 'from datetime import datetime\n'), ((1652, 1685), 'urllib.parse.quote_plus', 'urllib.parse.quote_plus', (['filename'], {}), '(filename)\n', (1675, 1685), False, 'import urllib\n')] |
import geopandas as gpd
from shapely.geometry import LineString, Polygon,MultiLineString
import os.path
from map2loop import m2l_utils
import warnings
import numpy as np
import pandas as pd
#explodes polylines and modifies objectid for exploded parts
def explode_polylines(indf,c_l,dst_crs):
#indf = gpd.GeoDataFrame.from_file(indata)
outdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
for idx, row in indf.iterrows():
if type(row.geometry) == LineString:
outdf = outdf.append(row,ignore_index=True)
if type(row.geometry) == MultiLineString:
multdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
recs = len(row.geometry)
multdf = multdf.append([row]*recs,ignore_index=True)
i=0
for geom in range(recs):
multdf.loc[geom,'geometry'] = row.geometry[geom]
multdf.loc[geom,c_l['o']]=str(multdf.loc[geom,c_l['o']])+'_'+str(i)
print('map2loop warning: Fault_'+multdf.loc[geom,c_l['o']],'is one of a set of duplicates, so renumbering')
i=i+1
outdf = outdf.append(multdf,ignore_index=True)
return outdf
def check_map(structure_file,geology_file,fault_file,mindep_file,fold_file,tmp_path,bbox,c_l,dst_crs,local_paths,drift_prefix,polygo):
#y_point_list = [bbox[1], bbox[1], bbox[3], bbox[3], bbox[1]]
#x_point_list = [bbox[0], bbox[2], bbox[2], bbox[0], bbox[0]]
#bbox_geom = Polygon(zip(x_point_list, y_point_list))
#polygo = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom])
m2l_errors=[]
m2l_warnings=[]
if(local_paths):
for file_name in (structure_file,geology_file,fault_file,mindep_file,fold_file):
if not os.path.isfile(file_name):
m2l_errors.append('file '+file_name+' not found')
# Process orientation points
if (os.path.isfile(structure_file) or not local_paths):
orientations2 = gpd.read_file(structure_file,bbox=bbox)
if(c_l['sf']==c_l['ds']):
new_code='NEW_'+c_l['sf']
new_code=new_code[:10]
orientations=orientations2.rename(columns={c_l['sf']:new_code}, errors="raise")
m2l_warnings.append('To avoid conflict with geology field of same name, orientation field named "'+str(c_l['sf'])+'" renamed to "'+new_code+'"')
c_l['sf']=new_code
else:
new_code=''
orientations=orientations2.copy()
if(c_l['bo']==c_l['ds'] and not new_code==''):
c_l['bo']=new_code
if(len(orientations)<2):
m2l_errors.append('not enough orientations to complete calculations (need at least 2)')
orientations = orientations.replace(r'^\s+$', np.nan, regex=True)
orientations = orientations[orientations[c_l['d']]!=-999]
for code in ('sf','d','dd','gi'):
if not c_l[code] in orientations.columns:
if(code=='sf'):
orientations[c_l[code]]='Bed'
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value "Bed"')
elif(not code=='gi'):
m2l_errors.append('"'+c_l[code]+'" field needed')
else:
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value')
orientations[c_l[code]] = np.arange(len(orientations))
else:
nans=orientations[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of orientations file, replacing with 0')
orientations[c_l[code]].fillna("0", inplace = True)
unique_o=set(orientations[c_l['gi']])
if(not len(unique_o) == len(orientations)):
m2l_warnings.append('duplicate orientation point unique IDs')
show_metadata(orientations,"orientations layer")
# Process geology polygons
if (os.path.isfile(geology_file) or not local_paths):
geology = gpd.read_file(geology_file,bbox=bbox)
if not c_l['o'] in geology.columns:
geology = geology.reset_index()
geology[c_l['o']]=geology.index
unique_g=set(geology[c_l['o']])
if(not len(unique_g) == len(geology)):
m2l_warnings.append('duplicate geology polygon unique IDs')
nans=geology[c_l['c']].isnull().sum()
if(nans>0):
m2l_errors.append(''+str(nans)+' NaN/blank found in column "'+str(c_l['c'])+'" of geology file, please fix')
if(c_l['g']=='No_col' or not c_l['g'] in geology.columns):
m2l_warnings.append('No secondary strat coding for geology polygons')
c_l['g']='group'
geology[c_l['g']]="Top"
geology = geology.replace(r'^\s+$', np.nan, regex=True)
geology[c_l['g']].fillna(geology[c_l['g2']], inplace=True)
geology[c_l['g']].fillna(geology[c_l['c']], inplace=True)
if(c_l['r1']=='No_col' or not c_l['r1'] in geology.columns):
m2l_warnings.append('No extra litho for geology polygons')
c_l['r1']='r1'
geology[c_l['r1']]='Nope'
if(c_l['r2']=='No_col' or not c_l['r2'] in geology.columns):
m2l_warnings.append('No more extra litho for geology polygons')
c_l['r2']='r2'
geology[c_l['r2']]='Nope'
if(c_l['min']=='No_col' or not c_l['min'] in geology.columns):
m2l_warnings.append('No min age for geology polygons')
c_l['min']='min'
geology[c_l['min']]=0
if(c_l['max']=='No_col' or not c_l['max'] in geology.columns):
m2l_warnings.append('No max age for geology polygons')
c_l['max']='max'
geology[c_l['max']]=100
if(c_l['c']=='No_col' or not c_l['c'] in geology.columns):
m2l_errors.append('Must have primary strat coding field for geology polygons')
for code in ('c','g','g2','ds','u','r1'):
if(c_l[code] in geology.columns):
geology[c_l[code]].str.replace(","," ")
if(code == 'c' or code =='g' or code=='g2'):
geology[c_l[code]].str.replace(" ","_")
geology[c_l[code]].str.replace("-","_")
geology[c_l[code]].str.replace(",","_")
nans=geology[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of geology file, replacing with 0')
geology[c_l[code]].fillna("0", inplace = True)
for drift in drift_prefix:
geology=geology[~geology[c_l['u']].str.startswith(drift)]
show_metadata(geology,"geology layer")
# Process fold polylines
if (os.path.isfile(fold_file) or not local_paths):
folds = gpd.read_file(fold_file,bbox=bbox)
if(len(folds)>0):
if not c_l['o'] in folds.columns:
folds = folds.reset_index()
folds[c_l['o']]=folds.index
unique_g=set(folds[c_l['o']])
if(not len(unique_g) == len(folds)):
m2l_warnings.append('duplicate fold polyline unique IDs')
folds = folds.replace(r'^\s+$', np.nan, regex=True)
for code in ('ff','t'):
if(c_l['ff']=='No_col' or not c_l['ff'] in folds.columns):
m2l_warnings.append('No fold code for fold polylines')
c_l['ff']='ff'
folds[c_l['ff']]=c_l['fold']
if(c_l['t']=='No_col' or not c_l['t'] in folds.columns):
m2l_warnings.append('No fold polarity for fold polylines')
c_l['t']='t'
folds[c_l['t']]='None'
if(c_l[code] in folds.columns):
folds[c_l[code]].str.replace(","," ")
nans=folds[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of folds file, replacing with 0')
folds[c_l[code]].fillna("0", inplace = True)
folds_clip=m2l_utils.clip_shp(folds,polygo)
if(len(folds_clip) > 0):
folds_explode = explode_polylines(folds_clip, c_l, dst_crs)
if(len(folds_explode) > len(folds_clip)):
m2l_warnings.append(
'some folds are MultiPolyLines, and have been split')
folds_explode.crs = dst_crs
show_metadata(folds_clip,"fold layer")
else:
print('No folds in area')
# Process fault polylines
if (os.path.isfile(fault_file) or not local_paths):
faults_folds = gpd.read_file(fault_file,bbox=bbox)
faults = faults_folds[faults_folds[c_l['f']].str.contains(c_l['fault'])]
faults = faults.replace(r'^\s+$', np.nan, regex=True)
if not c_l['o'] in faults.columns:
m2l_warnings.append('field named "'+str(c_l['o'])+'" added with default value')
faults[c_l['o']] = np.arange(len(faults))
for code in ('f','o','fdip','fdipdir','fdipest'):
if(c_l['f']=='No_col' or not c_l['f'] in faults.columns ):
m2l_warnings.append('No fault type for fault polylines')
c_l['f']='ftype'
faults[c_l['f']]=c_l['fault']
if(c_l['fdip']=='No_col' or not c_l['fdip'] in faults.columns ):
m2l_warnings.append('No fault dip for fault polylines')
c_l['fdip']='fdip'
faults[c_l['fdip']]=c_l['fdipnull']
if(c_l['fdipdir']=='No_col' or not c_l['fdipdir'] in faults.columns ):
m2l_warnings.append('No fault dip direction for fault polylines')
c_l['fdipdir']='fdipdir'
faults[c_l['fdipdir']]=0
if(c_l['fdipest']=='No_col' or not c_l['fdipest'] in faults.columns ):
m2l_warnings.append('No fault dip estimate for fault polylines')
c_l['fdipest']='fdipest'
faults[c_l['fdipest']]='None'
if(c_l['fdipest_vals']=='No_col' or not c_l['fdipest_vals'] in faults.columns ):
m2l_warnings.append('No fault dip estimate text for fault polylines')
c_l['fdipest_vals']='fdipest_vals'
faults[c_l['fdipest_vals']]='None'
if(c_l['n']=='No_col' or not c_l['n'] in faults.columns ):
m2l_warnings.append('No fault name for fault polylines')
c_l['n']='fname'
faults[c_l['n']]='None'
if not c_l[code] in faults.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in fault/fold file')
if(c_l[code] in faults.columns):
nans=faults[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of fault file, replacing with -999')
faults[c_l[code]].fillna("-999", inplace = True)
unique_f=set(faults[c_l['o']])
if(not len(unique_f) == len(faults)):
m2l_errors.append('duplicate fault/fold polyline unique IDs')
faults = faults.replace(r'^\s+$', np.nan, regex=True)
faults_clip=m2l_utils.clip_shp(faults,polygo)
if(len(faults_clip)>0):
faults_explode=explode_polylines(faults_clip,c_l,dst_crs)
if(len(faults_explode)>len(faults_clip)):
m2l_warnings.append('some faults are MultiPolyLines, and have been split')
faults_explode.crs = dst_crs
show_metadata(faults_explode,"fault layer")
else:
#fault_file='None'
print('No faults in area')
# Process mindep points
if (os.path.isfile(mindep_file) or not local_paths):
mindeps = gpd.read_file(mindep_file,bbox=bbox)
if(len(mindeps)==0):
m2l_warnings.append('no mindeps for analysis')
else:
mindeps = mindeps.replace(r'^\s+$', np.nan, regex=True)
for code in ('msc','msn','mst','mtc','mscm','mcom'):
if(c_l[code]=='No_col'):
mindeps[c_l[code]]='No_col'
if not c_l[code] in mindeps.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in mineral deposits file')
else:
nans=mindeps[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(str(nans)+' NaN/blank found in column '+str(c_l[code])+' of mindep file, replacing with 0')
mindeps[c_l[code]].fillna("0", inplace = True)
show_metadata(mindeps,"mindeps layer")
# explode fault/fold multipolylines
# sometimes faults go off map and come back in again which after clipping creates multipolylines
if(len(m2l_warnings)>0):
print("\nWarnings:")
warnings.warn('The warnings listed above were issued')
for w in m2l_warnings:
print(" ",w)
if(len(m2l_errors)>0):
print("\nErrors:")
warnings.warn('The errors listed above must be fixed prior to rerunning map2loop')
for e in m2l_errors:
print(" ",e)
raise NameError('map2loop error: Fix errors before running again')
if(len(m2l_errors)==0):
if(len(folds_clip)>0):
fold_file=tmp_path+'folds_clip.shp'
folds_explode=folds_explode.dropna(subset=['geometry'])
folds_explode.to_file(fold_file)
else:
fold_file=tmp_path+'fold_clip.shp'
print("\nFold layer metadata\n--------------------")
print("No folds found")
if(len(faults_clip)>0):
fault_file=tmp_path+'faults_clip.shp'
faults_explode.crs=dst_crs
faults_explode=faults_explode.dropna(subset=['geometry'])
faults_explode.to_file(fault_file)
else:
fault_file=tmp_path+'faults_clip.shp'
print("\nFault layer metadata\n--------------------")
print("No faults found")
geol_clip=gpd.overlay(geology, polygo, how='intersection')
if(len(geol_clip)>0):
geol_clip.crs=dst_crs
geol_file=tmp_path+'geol_clip.shp'
geol_clip.to_file(geol_file)
if(len(orientations)>0):
structure_file=tmp_path+'structure_clip.shp'
orientations.crs=dst_crs
orientations[c_l['dd']] = pd.to_numeric(orientations[c_l['dd']])
orientations[c_l['d']] = pd.to_numeric(orientations[c_l['d']])
orientations.to_file(structure_file)
if(len(mindeps)>0):
mindep_file=tmp_path+'mindeps_clip.shp'
mindeps.crs=dst_crs
mindeps.to_file(mindep_file)
print('\nNo errors found, clipped and updated files saved to tmp')
return(structure_file,geol_file,fault_file,mindep_file,fold_file,c_l)
def show_metadata(gdf,name):
if(len(gdf)>0):
print("\n",name," metadata\n--------------------")
print(" bbox",gdf.total_bounds)
print(" CRS",gdf.crs)
print(" # items",len(gdf))
types=[]
for i,g in gdf.iterrows():
if(not g.geometry.type in types):
types.append(g.geometry.type)
print(" Data types",types)
else:
print("\n",name," metadata\n--------------------")
print(" empty file, check contents") | [
"map2loop.m2l_utils.clip_shp",
"geopandas.read_file",
"pandas.to_numeric",
"geopandas.overlay",
"warnings.warn",
"geopandas.GeoDataFrame"
] | [((420, 471), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'columns': 'indf.columns', 'crs': 'dst_crs'}), '(columns=indf.columns, crs=dst_crs)\n', (436, 471), True, 'import geopandas as gpd\n'), ((2255, 2295), 'geopandas.read_file', 'gpd.read_file', (['structure_file'], {'bbox': 'bbox'}), '(structure_file, bbox=bbox)\n', (2268, 2295), True, 'import geopandas as gpd\n'), ((4408, 4446), 'geopandas.read_file', 'gpd.read_file', (['geology_file'], {'bbox': 'bbox'}), '(geology_file, bbox=bbox)\n', (4421, 4446), True, 'import geopandas as gpd\n'), ((7461, 7496), 'geopandas.read_file', 'gpd.read_file', (['fold_file'], {'bbox': 'bbox'}), '(fold_file, bbox=bbox)\n', (7474, 7496), True, 'import geopandas as gpd\n'), ((9497, 9533), 'geopandas.read_file', 'gpd.read_file', (['fault_file'], {'bbox': 'bbox'}), '(fault_file, bbox=bbox)\n', (9510, 9533), True, 'import geopandas as gpd\n'), ((12228, 12262), 'map2loop.m2l_utils.clip_shp', 'm2l_utils.clip_shp', (['faults', 'polygo'], {}), '(faults, polygo)\n', (12246, 12262), False, 'from map2loop import m2l_utils\n'), ((12836, 12873), 'geopandas.read_file', 'gpd.read_file', (['mindep_file'], {'bbox': 'bbox'}), '(mindep_file, bbox=bbox)\n', (12849, 12873), True, 'import geopandas as gpd\n'), ((13966, 14020), 'warnings.warn', 'warnings.warn', (['"""The warnings listed above were issued"""'], {}), "('The warnings listed above were issued')\n", (13979, 14020), False, 'import warnings\n'), ((14143, 14230), 'warnings.warn', 'warnings.warn', (['"""The errors listed above must be fixed prior to rerunning map2loop"""'], {}), "(\n 'The errors listed above must be fixed prior to rerunning map2loop')\n", (14156, 14230), False, 'import warnings\n'), ((15285, 15333), 'geopandas.overlay', 'gpd.overlay', (['geology', 'polygo'], {'how': '"""intersection"""'}), "(geology, polygo, how='intersection')\n", (15296, 15333), True, 'import geopandas as gpd\n'), ((767, 818), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', ([], {'columns': 'indf.columns', 'crs': 'dst_crs'}), '(columns=indf.columns, crs=dst_crs)\n', (783, 818), True, 'import geopandas as gpd\n'), ((8898, 8931), 'map2loop.m2l_utils.clip_shp', 'm2l_utils.clip_shp', (['folds', 'polygo'], {}), '(folds, polygo)\n', (8916, 8931), False, 'from map2loop import m2l_utils\n'), ((15669, 15707), 'pandas.to_numeric', 'pd.to_numeric', (["orientations[c_l['dd']]"], {}), "(orientations[c_l['dd']])\n", (15682, 15707), True, 'import pandas as pd\n'), ((15745, 15782), 'pandas.to_numeric', 'pd.to_numeric', (["orientations[c_l['d']]"], {}), "(orientations[c_l['d']])\n", (15758, 15782), True, 'import pandas as pd\n')] |
import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
x1 = tf.constant(5)
x2 = tf.constant(6)
result = tf.multiply(x1, x2)
print(result)
sess = tf.Session()
with tf.Session() as sess:
output = sess.run(result)
print(output)
| [
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.multiply"
] | [((83, 97), 'tensorflow.constant', 'tf.constant', (['(5)'], {}), '(5)\n', (94, 97), True, 'import tensorflow as tf\n'), ((103, 117), 'tensorflow.constant', 'tf.constant', (['(6)'], {}), '(6)\n', (114, 117), True, 'import tensorflow as tf\n'), ((128, 147), 'tensorflow.multiply', 'tf.multiply', (['x1', 'x2'], {}), '(x1, x2)\n', (139, 147), True, 'import tensorflow as tf\n'), ((170, 182), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (180, 182), True, 'import tensorflow as tf\n'), ((189, 201), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (199, 201), True, 'import tensorflow as tf\n')] |
import auth_key
import tweepy
import time
auth = tweepy.OAuthHandler(auth_key.API_key, auth_key.API_secret_key)
auth.set_access_token(auth_key.Access_token, auth_key.Access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
user = api.me()
indId = 2282863
india_trend = api.trends_place(indId)
tweetNo = 5
a =[]
trndInd = api.trends_place(indId)
for trend in trndInd[0]['trends']:
a.append(trend['name'])
for item in a:
print(item)
for tweet in tweepy.Cursor(api.search, item).items(tweetNo):
try:
print("tweet liked & retweeted")
tweet.favorite()
tweet.retweet()
time.sleep(10)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| [
"tweepy.Cursor",
"tweepy.API",
"time.sleep",
"tweepy.OAuthHandler"
] | [((54, 116), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['auth_key.API_key', 'auth_key.API_secret_key'], {}), '(auth_key.API_key, auth_key.API_secret_key)\n', (73, 116), False, 'import tweepy\n'), ((202, 275), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (212, 275), False, 'import tweepy\n'), ((530, 561), 'tweepy.Cursor', 'tweepy.Cursor', (['api.search', 'item'], {}), '(api.search, item)\n', (543, 561), False, 'import tweepy\n'), ((710, 724), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (720, 724), False, 'import time\n')] |
from ds_discovery import Controller
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
__author__ = '<NAME>'
def domain_controller():
# Controller
uri_pm_repo = os.environ.get('HADRON_PM_REPO', None)
controller = Controller.from_env(uri_pm_repo=uri_pm_repo, default_save=False, has_contract=True)
run_book = os.environ.get('HADRON_CONTROLLER_RUNBOOK', None)
repeat = os.environ.get('HADRON_CONTROLLER_REPEAT', None)
sleep = os.environ.get('HADRON_CONTROLLER_SLEEP', None)
controller.run_controller(run_book=run_book, repeat=repeat, sleep=sleep)
if __name__ == '__main__':
domain_controller()
| [
"warnings.simplefilter",
"ds_discovery.Controller.from_env",
"os.environ.get"
] | [((63, 125), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (84, 125), False, 'import warnings\n'), ((126, 193), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'DeprecationWarning'}), "(action='ignore', category=DeprecationWarning)\n", (147, 193), False, 'import warnings\n'), ((279, 317), 'os.environ.get', 'os.environ.get', (['"""HADRON_PM_REPO"""', 'None'], {}), "('HADRON_PM_REPO', None)\n", (293, 317), False, 'import os\n'), ((335, 422), 'ds_discovery.Controller.from_env', 'Controller.from_env', ([], {'uri_pm_repo': 'uri_pm_repo', 'default_save': '(False)', 'has_contract': '(True)'}), '(uri_pm_repo=uri_pm_repo, default_save=False,\n has_contract=True)\n', (354, 422), False, 'from ds_discovery import Controller\n'), ((434, 483), 'os.environ.get', 'os.environ.get', (['"""HADRON_CONTROLLER_RUNBOOK"""', 'None'], {}), "('HADRON_CONTROLLER_RUNBOOK', None)\n", (448, 483), False, 'import os\n'), ((497, 545), 'os.environ.get', 'os.environ.get', (['"""HADRON_CONTROLLER_REPEAT"""', 'None'], {}), "('HADRON_CONTROLLER_REPEAT', None)\n", (511, 545), False, 'import os\n'), ((558, 605), 'os.environ.get', 'os.environ.get', (['"""HADRON_CONTROLLER_SLEEP"""', 'None'], {}), "('HADRON_CONTROLLER_SLEEP', None)\n", (572, 605), False, 'import os\n')] |
import json
import unittest
from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator
class UtilsTest(unittest.TestCase):
def test_uuid(self):
print(generate_uuid())
self.assertEqual(len(generate_uuid()), 32)
def test_valiate(self):
form = dict(
a=1,
b=2,
c=3
)
v = Validator().rule("a").rule("b").rule("c").rule("d", False, 4)
_a, _b, _c, _d = v.validate_form(form)
self.assertEqual(_a, 1)
self.assertEqual(_b, 2)
self.assertEqual(_c, 3)
self.assertEqual(_d, 4)
def test_validate_none_form(self):
v = Validator().rule("page", False, 1).rule("per_page", False, 10)
page, per_page = v.validate_form(None)
self.assertEqual(page, 1)
self.assertEqual(per_page, 10)
def test_validate_none_form_required(self):
v = Validator().rule("page")
try:
v.validate_form(None)
except ValidationError as e:
print(e)
try:
v.validate_form(dict(size=2))
except ValidationError as e:
print(e)
def test_extend(self):
try:
[].extend(None)
except TypeError as e:
print(e)
def test_paging(self):
p = Paging(101, 1, 10)
print(json.dumps(p.__dict__))
def test_json_encode(self):
p = Paging(101, 1, 10)
print(CustomEncoder().encode(p))
| [
"utils.generate_uuid",
"json.dumps",
"utils.Validator",
"utils.CustomEncoder",
"utils.Paging"
] | [((1317, 1335), 'utils.Paging', 'Paging', (['(101)', '(1)', '(10)'], {}), '(101, 1, 10)\n', (1323, 1335), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((1423, 1441), 'utils.Paging', 'Paging', (['(101)', '(1)', '(10)'], {}), '(101, 1, 10)\n', (1429, 1441), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((190, 205), 'utils.generate_uuid', 'generate_uuid', ([], {}), '()\n', (203, 205), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((1350, 1372), 'json.dumps', 'json.dumps', (['p.__dict__'], {}), '(p.__dict__)\n', (1360, 1372), False, 'import json\n'), ((236, 251), 'utils.generate_uuid', 'generate_uuid', ([], {}), '()\n', (249, 251), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((913, 924), 'utils.Validator', 'Validator', ([], {}), '()\n', (922, 924), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((1456, 1471), 'utils.CustomEncoder', 'CustomEncoder', ([], {}), '()\n', (1469, 1471), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((669, 680), 'utils.Validator', 'Validator', ([], {}), '()\n', (678, 680), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n'), ((380, 391), 'utils.Validator', 'Validator', ([], {}), '()\n', (389, 391), False, 'from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
from . import entries, meta
logger = logging.getLogger(__name__)
def build_parser():
prog = os.path.basename(sys.argv[0])
if prog not in ("pyclean", "pyclean.py"):
prog = "pyclean"
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument(
"entries", nargs="+", metavar="DIR_OR_FILE",
)
parser.add_argument(
"-v", "--verbose", dest="verbose",
action="store_true", help="be verbose",
)
parser.add_argument(
"--version", action="version",
version="%(prog)s, version {}".format(meta.__version__),
)
return parser
def parse_args(argv):
parser = build_parser()
options = parser.parse_args(argv)
return options
def setup_logging(options):
if options.verbose:
logging.root.setLevel(logging.DEBUG)
form = "%(levelname).1s: %(module)s:%(lineno)d: %(message)s"
else:
logging.root.setLevel(logging.INFO)
form = "%(message)s"
logging.basicConfig(format=form)
def main(argv=None):
options = parse_args(argv)
setup_logging(options)
if options.verbose:
logger.debug("options: %s", options.__dict__)
entries.clean(options.entries)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.basicConfig",
"argparse.ArgumentParser",
"logging.root.setLevel",
"os.path.basename"
] | [((139, 166), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (156, 166), False, 'import logging\n'), ((200, 229), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (216, 229), False, 'import os\n'), ((314, 348), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'prog'}), '(prog=prog)\n', (337, 348), False, 'import argparse\n'), ((1075, 1107), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'form'}), '(format=form)\n', (1094, 1107), False, 'import logging\n'), ((882, 918), 'logging.root.setLevel', 'logging.root.setLevel', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (903, 918), False, 'import logging\n'), ((1006, 1041), 'logging.root.setLevel', 'logging.root.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (1027, 1041), False, 'import logging\n')] |
from flask import Flask, render_template, url_for, redirect, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from dateutil.relativedelta import relativedelta
from demail import demail
__author__ = '<NAME>'
__doc__ = 'Never Forget online remainder'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///list.db'
# Remember, every time you make changes to the column (such as adding one col or removing one col, change the value),
# you have to do the following: open terminal from pycharm, python3.7, from app import db, db.create_all() and exit.
db = SQLAlchemy(app)
db.create_all()
datetime_format = '%b-%d-%Y %H:%M'
'''
This part requires your email information in order to receive email notifications. (This is left blank intentionally)
'''
email_account = ''
email_password = ''
# TODO send email warning if the due time is so soon and still incomplete,
class TODO(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(500), nullable=False)
time_created_str = datetime.now().strftime("%B-%d-%Y %H:%M:%S")
time_created = db.Column(db.String, default=time_created_str)
time_due = db.Column(db.String(500), nullable=False)
# By default, the email warning is disabled
email_warning = db.Column(db.Integer, default=0)
def __repr__(self):
return self.id
def __str__(self):
return self.__repr__()
def get_time_color(self):
time_dif = self.get_time_difference()
if time_dif['days'] < 0 or time_dif['seconds'] < 0:
return 'black'
elif time_dif['days'] > 30:
return "#0000ff"
elif time_dif['days'] > 7:
return "#0080ff"
elif time_dif['days'] > 2:
return '#00ff00'
elif time_dif['days'] >= 1:
return '#bfff00'
# >Half day
elif time_dif['seconds'] >= 43200:
return "#ffff00"
# >3h
elif time_dif['seconds'] >= 10800:
send_email(self)
return "#ffbf00"
# >1h
elif time_dif['seconds'] >= 3600:
send_email(self)
return "#ff8000"
else:
send_email(self)
return "#ff0000"
def get_time_difference(self):
return get_time_difference(datetime.strptime(self.time_due.__str__(), datetime_format))
'''
This will return a new date & time that after adding the values in time dictionaries
'''
def get_time(**time):
# TODO could I optimize those statements using comprehension for?
for item in ['hour', 'minute', 'day', 'month', 'year']:
if item not in time:
time[item] = 0
time_now = datetime.now() + relativedelta(hours=time['hour'], minutes=time['minute'], days=time['day'],
months=time['month'], years=time['year'])
return time_now.strftime(datetime_format)
def get_time_difference(time):
time_now = datetime.now().replace(microsecond=0)
diff = time - time_now
return {'days': diff.days, 'seconds': diff.seconds}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return redirect('issues/404.html')
elif request.method == 'GET':
tasks = TODO.query.order_by(TODO.time_created).all()
time_now = datetime.now().strftime(datetime_format)
return render_template("index.html", tasks=tasks, mintime=time_now, maxtime=get_time(year=100),
display_time=get_time(hour=3))
else:
return "Invalid method: " + request.method
@app.route('/addTask/<content>/<due_date>', methods=['POST'])
def addTask(content, due_date):
if request.method == 'POST':
# content = request.form['content']
try:
datetime.strptime(due_date, datetime_format)
except:
print("The time is not in correct format")
task = TODO(content=content, time_due=due_date)
# Add to database
try:
db.session.add(task)
db.session.commit()
return redirect('/')
except:
print("Unable to add the task")
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/editTask/<int:tid>/<content>/<due_date>/<email_warning>', methods=['POST'])
def editTask(tid, content, due_date, email_warning):
task = TODO.query.get_or_404(tid)
# Accessing through form in edit
task.content = content
task.time_due = due_date
task.email_warning = email_warning
try:
db.session.commit()
return redirect('/')
except:
print("Unable to edit the task")
@app.route('/editTask/<int:tid>', methods=['GET'])
def edit_task_jump(tid):
return render_template('edit.html', task=TODO.query.get_or_404(tid), maxtime=get_time(year=100))
@app.route('/cmTask/<int:tid>', methods=['GET'])
def cmTask(tid):
if request.method == 'GET':
task = TODO.query.get_or_404(tid)
try:
db.session.delete(task)
db.session.commit()
return redirect('/')
except:
return render_template('issues/unable_to.html', issue='complete the task')
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/setting/<email_add>', methods=['POST'])
def setting(email_add):
write_file('email.cfg', email_add)
return ''
@app.route('/setting/', methods=['GET'])
def setting_redirect():
email = '' + read_file('email.cfg')
return render_template('setting.html', email=email)
def read_file(filename):
try:
with open(filename) as f:
return f.readline()
except IOError:
print("IO ERROR Raised. Reading file failed,")
f = open(filename, "w")
f.write('<EMAIL>')
f.close()
return 'content'
def write_file(filename, file_content):
try:
with open(filename, 'w') as f:
f.write(file_content)
except IOError:
print("IO ERROR Raised. Writing file failed,")
return ''
def send_email(todo_object):
pass
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# assert isinstance(todo_object, TODO)
# sendto = read_file('email.cfg')
# email_obj = demail(email_account, email_password, sendto)
# email_content = f'''
# Subject: Your task is about to due
# Hello, this is automatic remainder that reminds you your task {todo_object.content} will due soon''' + '''
# ({todo_object.get_time_difference()['days']}days and {todo_object.get_time_difference()['seconds']} seconds) '''
# email_obj.send(email_content)
# return ''
if __name__ == '__main__':
app.run(debug=False)
| [
"flask.render_template",
"dateutil.relativedelta.relativedelta",
"flask.Flask",
"datetime.datetime.strptime",
"flask.redirect",
"datetime.datetime.now",
"flask_sqlalchemy.SQLAlchemy"
] | [((286, 301), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((602, 617), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (612, 617), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((5602, 5646), 'flask.render_template', 'render_template', (['"""setting.html"""'], {'email': 'email'}), "('setting.html', email=email)\n", (5617, 5646), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((2706, 2720), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2718, 2720), False, 'from datetime import datetime\n'), ((2723, 2844), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'hours': "time['hour']", 'minutes': "time['minute']", 'days': "time['day']", 'months': "time['month']", 'years': "time['year']"}), "(hours=time['hour'], minutes=time['minute'], days=time['day'],\n months=time['month'], years=time['year'])\n", (2736, 2844), False, 'from dateutil.relativedelta import relativedelta\n'), ((3206, 3233), 'flask.redirect', 'redirect', (['"""issues/404.html"""'], {}), "('issues/404.html')\n", (3214, 3233), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((4209, 4280), 'flask.render_template', 'render_template', (['"""issues/unable_to.html"""'], {'issue': '"""method not applicable"""'}), "('issues/unable_to.html', issue='method not applicable')\n", (4224, 4280), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((4649, 4662), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (4657, 4662), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((5280, 5351), 'flask.render_template', 'render_template', (['"""issues/unable_to.html"""'], {'issue': '"""method not applicable"""'}), "('issues/unable_to.html', issue='method not applicable')\n", (5295, 5351), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((1065, 1079), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1077, 1079), False, 'from datetime import datetime\n'), ((2981, 2995), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2993, 2995), False, 'from datetime import datetime\n'), ((3814, 3858), 'datetime.datetime.strptime', 'datetime.strptime', (['due_date', 'datetime_format'], {}), '(due_date, datetime_format)\n', (3831, 3858), False, 'from datetime import datetime\n'), ((4110, 4123), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (4118, 4123), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((5138, 5151), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (5146, 5151), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((5187, 5254), 'flask.render_template', 'render_template', (['"""issues/unable_to.html"""'], {'issue': '"""complete the task"""'}), "('issues/unable_to.html', issue='complete the task')\n", (5202, 5254), False, 'from flask import Flask, render_template, url_for, redirect, request\n'), ((3348, 3362), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3360, 3362), False, 'from datetime import datetime\n')] |
"""The solaredge integration."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load the saved entities."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
| [
"homeassistant.helpers.config_validation.deprecated"
] | [((264, 285), 'homeassistant.helpers.config_validation.deprecated', 'cv.deprecated', (['DOMAIN'], {}), '(DOMAIN)\n', (277, 285), True, 'import homeassistant.helpers.config_validation as cv\n')] |
import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.util import nest, tf_inspect
from tensorflow.python.eager import tape
# from tensorflow.python.ops.custom_gradient import graph_mode_decorator
# 是否使用重计算
do_recompute = strtobool(os.environ.get('RECOMPUTE', '0'))
# 知乎:https://zhuanlan.zhihu.com/p/349492378
# 论文:https://arxiv.53yu.com/pdf/1606.08415.pdf
def gelu_erf(x):
"""根据erf直接计算gelu
"""
# np的精度更高,默认64位,tf默认32位
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def gelu_tanh(x):
cdf = 0.5 * (
1 + K.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x,3)))
)
return x * cdf
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must in erf or tanh'
if version == 'erf':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_erf
elif version == 'tanh':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def align(tensor, axes, ndim=None):
"""重新对齐tensor(批量版expand_dims)感觉更像是transpose
axes: 原来的第i维对齐新tensor的第axes[i]维;
ndim: 新tensor的维度
Example:
>>> tensor = tf.constant(np.arange(12).reshape(3,4), dtype=tf.float32)
>>> print(tensor)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> same_dim = align(tensor, [0, -1], 2)
>>> print(same_dim)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> more_dim = align(tensor, [0, -1], 3)
>>> print(more_dim)
tf.Tensor(
[[[ 0. 1. 2. 3.]]
<BLANKLINE>
[[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]]], shape=(3, 1, 4), dtype=float32)
"""
assert len(axes) == K.ndim(tensor)
indices = [None] * (ndim or max(axes))
for i in axes:
indices[i] = slice(None)
return tensor[indices]
def sequence_masking(x, mask, value=0, axis=None):
"""为序列条件mask的函数
parameters:
-----------
x: tensor
输入张量
mask: tensor
形如(batch_size, seq_len)的0-1矩阵
value: float or str
mask部分要被替换成的值,允许'inf'与'-inf'
axis: int
序列所在的轴,默认为1
"""
if mask is None:
return x
# 确保x类型,可以执行*运算
x_type = K.dtype(x)
if x_type == 'bool':
x = K.cast(x, 'int32')
# 确保mask类型 = x类型
if K.dtype(mask) != K.dtype(x):
mask = K.cast(mask, K.dtype(x))
if value == '-inf':
# -----------是个函数吗??---------------
value = -K.infinity
if value == 'inf':
value = K.infinity
value = K.cast(value, K.dtype(x))
# 确定axis
if axis is None:
axis = 1
if axis < 0:
axis = K.ndim(x) + axis
assert axis > 0, 'axis must be greater than 0'
# 统一shape
for _ in range(axis - 1): # > 1时生效
mask = K.expand_dims(mask, 1) # 把第0维让给batch_size
for _ in range(K.ndim(x) - K.ndim(mask)):
mask = K.expand_dims(mask, K.ndim(mask))
x = x * mask + value * (1 - mask)
# 与输入x的类型统一
if x_type == 'bool':
x = K.cast(x, x_type)
return x
def recompute_grad(call):
# ----------------------完全没看懂????------------------------
"""重计算装饰器,用来装饰keras层的call函数
目的是:通过一些额外的计算减少显存的占用
论文:https://arxiv.org/abs/1604.06174
"""
if not do_recompute:
return call
def inner(self, inputs, **kwargs):
# 2.x的tf.nest.flatten不会对numpy和tf.tensor进行展平
flat_inputs = nest.flatten(inputs)
call_args = tf_inspect.getfullargspec(call).args
for key in ['mask', 'training']:
if key not in call_args and key in kwargs:
del kwargs[key]
def kernel_call():
"""定义前向计算
"""
return call(self, inputs, **kwargs)
def call_and_grad(*inputs):
"""定义前向计算和反向计算
"""
with tape.stop_recording():
outputs = kernel_call()
outputs = tf.identity(outputs)
def grad_fn(doutputs, variables=None):
watches = list(inputs)
if variables is not None:
watches += list(variables)
with tf.GradientTape() as t:
t.watch(watches)
with tf.control_dependencies([doutputs]):
outputs = kernel_call()
grads = t.gradient(
outputs, watches, output_gradients=[doutputs]
)
del t
return grads[:len(inputs)], grads[len(inputs):]
return outputs, grad_fn
outputs, grad_fn = call_and_grad(*flat_inputs)
flat_outputs = nest.flatten(outputs)
def actual_grad_fn(*doutputs):
grads = grad_fn(*doutputs, variables=self.trainable_weights)
return grads[0] + grads[1]
watches = flat_inputs + self.trainable_weights
watches = [tf.convert_to_tensor(x) for x in watches]
tape.record_operation(
call.__name__, flat_outputs, watches, actual_grad_fn
)
return outputs
return inner
def infinity():
"""返回默认的代表无穷大的数值
"""
return tf.keras.utils.get_custom_objects().get('infinity', 1e12)
def set_infinity(value):
"""设置新的代表无穷大的数值
"""
tf.keras.utils.get_custom_objects()['infinity'] = value
# 添加到 keras.backend 上,使其可以像 K.epsilon() 那样操作
K.infinity = infinity
K.set_infinity = set_infinity
sys.modules['tensorflow.keras.backend'] = K
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
}
tf.keras.utils.get_custom_objects().update(custom_objects)
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"tensorflow.python.eager.tape.stop_recording",
"numpy.sqrt",
"os.environ.get",
"tensorflow.python.util.nest.flatten",
"tensorflow.keras.backend.ndim",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.GradientTape",
"tensorflow.keras.backend.pow",
"tensorflow.keras.backend.dtype",
"t... | [((348, 380), 'os.environ.get', 'os.environ.get', (['"""RECOMPUTE"""', '"""0"""'], {}), "('RECOMPUTE', '0')\n", (362, 380), False, 'import os, sys\n'), ((2470, 2480), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2477, 2480), True, 'import tensorflow.keras.backend as K\n'), ((6013, 6030), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (6028, 6030), False, 'import doctest\n'), ((1947, 1961), 'tensorflow.keras.backend.ndim', 'K.ndim', (['tensor'], {}), '(tensor)\n', (1953, 1961), True, 'import tensorflow.keras.backend as K\n'), ((2520, 2538), 'tensorflow.keras.backend.cast', 'K.cast', (['x', '"""int32"""'], {}), "(x, 'int32')\n", (2526, 2538), True, 'import tensorflow.keras.backend as K\n'), ((2569, 2582), 'tensorflow.keras.backend.dtype', 'K.dtype', (['mask'], {}), '(mask)\n', (2576, 2582), True, 'import tensorflow.keras.backend as K\n'), ((2586, 2596), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2593, 2596), True, 'import tensorflow.keras.backend as K\n'), ((2817, 2827), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2824, 2827), True, 'import tensorflow.keras.backend as K\n'), ((3058, 3080), 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['mask', '(1)'], {}), '(mask, 1)\n', (3071, 3080), True, 'import tensorflow.keras.backend as K\n'), ((3293, 3310), 'tensorflow.keras.backend.cast', 'K.cast', (['x', 'x_type'], {}), '(x, x_type)\n', (3299, 3310), True, 'import tensorflow.keras.backend as K\n'), ((3693, 3713), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), '(inputs)\n', (3705, 3713), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((4949, 4970), 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['outputs'], {}), '(outputs)\n', (4961, 4970), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((5256, 5331), 'tensorflow.python.eager.tape.record_operation', 'tape.record_operation', (['call.__name__', 'flat_outputs', 'watches', 'actual_grad_fn'], {}), '(call.__name__, flat_outputs, watches, actual_grad_fn)\n', (5277, 5331), False, 'from tensorflow.python.eager import tape\n'), ((5589, 5624), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5622, 5624), True, 'import tensorflow as tf\n'), ((5899, 5934), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5932, 5934), True, 'import tensorflow as tf\n'), ((951, 986), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (984, 986), True, 'import tensorflow as tf\n'), ((2627, 2637), 'tensorflow.keras.backend.dtype', 'K.dtype', (['x'], {}), '(x)\n', (2634, 2637), True, 'import tensorflow.keras.backend as K\n'), ((2917, 2926), 'tensorflow.keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (2923, 2926), True, 'import tensorflow.keras.backend as K\n'), ((3121, 3130), 'tensorflow.keras.backend.ndim', 'K.ndim', (['x'], {}), '(x)\n', (3127, 3130), True, 'import tensorflow.keras.backend as K\n'), ((3133, 3145), 'tensorflow.keras.backend.ndim', 'K.ndim', (['mask'], {}), '(mask)\n', (3139, 3145), True, 'import tensorflow.keras.backend as K\n'), ((3184, 3196), 'tensorflow.keras.backend.ndim', 'K.ndim', (['mask'], {}), '(mask)\n', (3190, 3196), True, 'import tensorflow.keras.backend as K\n'), ((3735, 3766), 'tensorflow.python.util.tf_inspect.getfullargspec', 'tf_inspect.getfullargspec', (['call'], {}), '(call)\n', (3760, 3766), False, 'from tensorflow.python.util import nest, tf_inspect\n'), ((5205, 5228), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (5225, 5228), True, 'import tensorflow as tf\n'), ((5466, 5501), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (5499, 5501), True, 'import tensorflow as tf\n'), ((1044, 1079), 'tensorflow.keras.utils.get_custom_objects', 'tf.keras.utils.get_custom_objects', ([], {}), '()\n', (1077, 1079), True, 'import tensorflow as tf\n'), ((4124, 4145), 'tensorflow.python.eager.tape.stop_recording', 'tape.stop_recording', ([], {}), '()\n', (4143, 4145), False, 'from tensorflow.python.eager import tape\n'), ((4215, 4235), 'tensorflow.identity', 'tf.identity', (['outputs'], {}), '(outputs)\n', (4226, 4235), True, 'import tensorflow as tf\n'), ((602, 614), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (609, 614), True, 'import numpy as np\n'), ((679, 697), 'numpy.sqrt', 'np.sqrt', (['(2 / np.pi)'], {}), '(2 / np.pi)\n', (686, 697), True, 'import numpy as np\n'), ((4443, 4460), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4458, 4460), True, 'import tensorflow as tf\n'), ((4531, 4566), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[doutputs]'], {}), '([doutputs])\n', (4554, 4566), True, 'import tensorflow as tf\n'), ((716, 727), 'tensorflow.keras.backend.pow', 'K.pow', (['x', '(3)'], {}), '(x, 3)\n', (721, 727), True, 'import tensorflow.keras.backend as K\n')] |
import datetime
import os
import requests
import tweepy
from PIL import Image
# Get your own keys from developer.twitter.com
# You can find a detailed tutorial about authenticating accounts from github.com/gultugaydemir/Twitter_OAuth1.0a
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# You can get your own API key from api.nasa.gov. However simply writing "DEMO_KEY" works too, as it can be seen on the website.
response = requests.get("https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY") #This link contains the data we needed about the photo of the day.
data = response.json() # Converts the data to JSON format so that we can retrieve data from it.
description = data["title"] # Getting the title of the photo.
date = datetime.datetime.now().strftime("%y%m%d") # We need the {yymmdd} format for the source link.
source = "https://apod.nasa.gov/apod/ap{date}.html".format(date=date) # Creating the source link for the posted photo.
message = '"' + description + '" \n' + source # The status format for the image tweets.
message_video = '"' + description + '" \n' # The status format for the YouTube tweets.
try:
image = data["hdurl"] # The image URL from API.
except KeyError: # Code throws KeyError if a video is posted that day, since API doesn't include a "hdurl" element.
image = data["url"]
image = image.replace("embed/", "watch?v=")
api.update_status(status = message_video+ source + ' \n'+ image) # Bot only tweets the YouTube link and not a picture.
print("Video tweeted successfully.")
quit()
# Tweepy's "update_with_media" function only allows us to tweet an image from the local directory.
# Since posting the picture from a URL would be more practical, I'm using a function that will complete this step for me automatically.
def tweet_image(url, message):
tweeted=False
photo = 'photo.jpg'
request = requests.get(url, stream=True)
if request.status_code == 200:
with open(photo, 'wb') as media:
for url in request:
media.write(url)
while not tweeted:
try:
im = Image.open(photo)
w,h = im.size
print(w)
print(h)
api.update_with_media(photo, status=message)
print("Image tweeted successfully.")
tweeted = True
except tweepy.error.TweepError:
print("Resizing image...")
im = Image.open(photo)
width, height = im.size
print(width)
print(height)
im_resize = im.resize((int(width*0.99999999999), int(height*0.99999999999)), Image.ANTIALIAS)
im_resize.save(photo)
tweet_image(image, message) # Tweeting the picture with the status. Image URL and the status message are used as parameters.
| [
"PIL.Image.open",
"requests.get",
"datetime.datetime.now",
"tweepy.API",
"tweepy.OAuthHandler"
] | [((330, 380), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (349, 380), False, 'import tweepy\n'), ((444, 460), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (454, 460), False, 'import tweepy\n'), ((604, 672), 'requests.get', 'requests.get', (['"""https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY"""'], {}), "('https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY')\n", (616, 672), False, 'import requests\n'), ((2042, 2072), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (2054, 2072), False, 'import requests\n'), ((907, 930), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (928, 930), False, 'import datetime\n'), ((2268, 2285), 'PIL.Image.open', 'Image.open', (['photo'], {}), '(photo)\n', (2278, 2285), False, 'from PIL import Image\n'), ((2584, 2601), 'PIL.Image.open', 'Image.open', (['photo'], {}), '(photo)\n', (2594, 2601), False, 'from PIL import Image\n')] |
import pickle
import os
import tensorflow as tf
from glob import glob
import utils.DataLoaderUtils as dlu
from utils.AnnotationUtils import write_dad_masks
# Static Dataset Config Options
TAG_NAMES = {'highlights',
'urls_to_supplementary',
'abbreviation',
'abstract',
'additional_file',
'affiliation',
'appendice',
'author_bio',
'author_contribution',
'author_name',
'availability_of_data',
'caption',
'conflict_int',
'contact_info',
'copyright',
'core_text',
'date',
'doi',
'figure',
'funding_info',
'index',
'keywords',
'list',
'math_formula',
'note',
'publisher_note',
'reference',
'section_heading',
'subheading',
'table',
'title',
'nomenclature',
'code',
'publisher',
'journal',
'corresponding_author',
'editor',
'ethics',
'consent_publication',
'MSC',
'article_history',
'acknowledgment',
'background'}
TAG_MAPPING = {'abbreviation': 'background',
'acknowledgment': 'background',
'additional_file': 'background',
'affiliation': 'background',
'article_history': 'background',
'author_contribution': 'background',
'availability_of_data': 'background',
'code': 'background',
'conflict_int': 'background',
'consent_publication': 'background',
'corresponding_author': 'background',
'date': 'background',
'ethics': 'background',
'index': 'background',
'journal': 'background',
'nomenclature': 'background',
'publisher_note': 'background',
'urls_to_supplementary': 'background',
'msc': 'background',
'MSC': 'background',
'highlights': 'background',
'subheading': 'section_heading'}
SAVED_PKL_FILE = 'saved_dad_paths.pkl'
BUFFER_SIZE = 500
MASKS_DIR = "masks"
DOCUMENTS_DIR = "documents"
ANNOTATIONS_DIR = "annotations"
def write_masks(dataset_dir, border_buffer=6):
anno_dir = os.path.join(dataset_dir, ANNOTATIONS_DIR)
anno_paths = glob(anno_dir + "/*/*json")
if os.path.exists(SAVED_PKL_FILE):
all_used_tags, class_mapping = pickle.load(open(SAVED_PKL_FILE, 'rb'))
else:
print("Running full mask generation, this may take a bit.")
all_used_tags = {}
for anno_json in anno_paths:
_, class_mapping, used_tags = write_dad_masks(anno_json,
ANNOTATIONS_DIR,
DOCUMENTS_DIR,
MASKS_DIR,
tag_names=TAG_NAMES,
tag_mapping=TAG_MAPPING,
buffer_size=border_buffer,
force=True)
all_used_tags.update(used_tags)
pickle.dump((all_used_tags, class_mapping), open(SAVED_PKL_FILE, 'wb'))
return all_used_tags, class_mapping
def build_dad_dataset(dataset_dir, img_size, batch_size, seed, debug=False):
all_used_tags, class_mapping = write_masks(dataset_dir)
# Filter out any pages that have no classes (this is helpful when messing around with active classes)
filtered_used_tags = {}
for path, used_tags in all_used_tags.items():
if len(used_tags) != 0:
filtered_used_tags[path] = used_tags
# Split the paths with stratified sampling, to mainting class distribution
train_paths, test_paths = dlu.stratify_train_test_split(filtered_used_tags, 0.10, seed=seed, debug=debug)
#%% - further split the test set into test and validation sets
test_used_tags = {}
for path, used_tags in filtered_used_tags.items():
if path in test_paths:
test_used_tags[path] = used_tags
test_paths, valid_paths = dlu.stratify_train_test_split(test_used_tags, 0.50, seed=seed, debug=debug)
train_dataset = tf.data.Dataset.from_tensor_slices(train_paths)
train_dataset = train_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid_dataset = tf.data.Dataset.from_tensor_slices(valid_paths)
valid_dataset = valid_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_paths)
test_dataset = test_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train_dataset.map(lambda x: dlu.load_image_train(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train.shuffle(buffer_size=BUFFER_SIZE, seed=seed, reshuffle_each_iteration=True)
train = train.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
train = train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
valid = valid_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid = valid.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
valid = valid.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test = test_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = test.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
test = test.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return train, valid, test, class_mapping
| [
"os.path.exists",
"utils.DataLoaderUtils.parse_image",
"utils.DataLoaderUtils.load_image_test",
"tensorflow.data.Dataset.from_tensor_slices",
"os.path.join",
"utils.DataLoaderUtils.stratify_train_test_split",
"utils.DataLoaderUtils.load_image_train",
"utils.AnnotationUtils.write_dad_masks",
"glob.gl... | [((2610, 2652), 'os.path.join', 'os.path.join', (['dataset_dir', 'ANNOTATIONS_DIR'], {}), '(dataset_dir, ANNOTATIONS_DIR)\n', (2622, 2652), False, 'import os\n'), ((2670, 2697), 'glob.glob', 'glob', (["(anno_dir + '/*/*json')"], {}), "(anno_dir + '/*/*json')\n", (2674, 2697), False, 'from glob import glob\n'), ((2705, 2735), 'os.path.exists', 'os.path.exists', (['SAVED_PKL_FILE'], {}), '(SAVED_PKL_FILE)\n', (2719, 2735), False, 'import os\n'), ((4250, 4328), 'utils.DataLoaderUtils.stratify_train_test_split', 'dlu.stratify_train_test_split', (['filtered_used_tags', '(0.1)'], {'seed': 'seed', 'debug': 'debug'}), '(filtered_used_tags, 0.1, seed=seed, debug=debug)\n', (4279, 4328), True, 'import utils.DataLoaderUtils as dlu\n'), ((4587, 4661), 'utils.DataLoaderUtils.stratify_train_test_split', 'dlu.stratify_train_test_split', (['test_used_tags', '(0.5)'], {'seed': 'seed', 'debug': 'debug'}), '(test_used_tags, 0.5, seed=seed, debug=debug)\n', (4616, 4661), True, 'import utils.DataLoaderUtils as dlu\n'), ((4688, 4735), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['train_paths'], {}), '(train_paths)\n', (4722, 4735), True, 'import tensorflow as tf\n'), ((4889, 4936), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['valid_paths'], {}), '(valid_paths)\n', (4923, 4936), True, 'import tensorflow as tf\n'), ((5093, 5139), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['test_paths'], {}), '(test_paths)\n', (5127, 5139), True, 'import tensorflow as tf\n'), ((3000, 3162), 'utils.AnnotationUtils.write_dad_masks', 'write_dad_masks', (['anno_json', 'ANNOTATIONS_DIR', 'DOCUMENTS_DIR', 'MASKS_DIR'], {'tag_names': 'TAG_NAMES', 'tag_mapping': 'TAG_MAPPING', 'buffer_size': 'border_buffer', 'force': '(True)'}), '(anno_json, ANNOTATIONS_DIR, DOCUMENTS_DIR, MASKS_DIR,\n tag_names=TAG_NAMES, tag_mapping=TAG_MAPPING, buffer_size=border_buffer,\n force=True)\n', (3015, 3162), False, 'from utils.AnnotationUtils import write_dad_masks\n'), ((4784, 4816), 'utils.DataLoaderUtils.parse_image', 'dlu.parse_image', (['x', '(0)', 'MASKS_DIR'], {}), '(x, 0, MASKS_DIR)\n', (4799, 4816), True, 'import utils.DataLoaderUtils as dlu\n'), ((4985, 5017), 'utils.DataLoaderUtils.parse_image', 'dlu.parse_image', (['x', '(0)', 'MASKS_DIR'], {}), '(x, 0, MASKS_DIR)\n', (5000, 5017), True, 'import utils.DataLoaderUtils as dlu\n'), ((5186, 5218), 'utils.DataLoaderUtils.parse_image', 'dlu.parse_image', (['x', '(0)', 'MASKS_DIR'], {}), '(x, 0, MASKS_DIR)\n', (5201, 5218), True, 'import utils.DataLoaderUtils as dlu\n'), ((5311, 5344), 'utils.DataLoaderUtils.load_image_train', 'dlu.load_image_train', (['x', 'img_size'], {}), '(x, img_size)\n', (5331, 5344), True, 'import utils.DataLoaderUtils as dlu\n'), ((5741, 5773), 'utils.DataLoaderUtils.load_image_test', 'dlu.load_image_test', (['x', 'img_size'], {}), '(x, img_size)\n', (5760, 5773), True, 'import utils.DataLoaderUtils as dlu\n'), ((6075, 6107), 'utils.DataLoaderUtils.load_image_test', 'dlu.load_image_test', (['x', 'img_size'], {}), '(x, img_size)\n', (6094, 6107), True, 'import utils.DataLoaderUtils as dlu\n')] |
import time
import queue
import threading
def aaa(i):
while True:
item = q.get()
if item is None:
print("线程%s发现了一个None,可以休息了^-^" % i)
break
time.sleep(0.01)
print('aaaaa -> ' + str(i) + " ---> " + str(item))
q.task_done()
if __name__ == '__main__':
num_of_threads = 5
source = [i for i in range(1, 21)]
q = queue.Queue()
threads = []
for i in range(1, num_of_threads + 1):
t = threading.Thread(target=aaa, args=(i,))
threads.append(t)
t.start()
for item in source:
time.sleep(0.01)
q.put(item)
q.join()
# print("-----工作都完成了-----")
# # 停止工作线程
for i in range(num_of_threads):
q.put(None)
# for t in threads:
# t.join()
# print(threads)
| [
"threading.Thread",
"queue.Queue",
"time.sleep"
] | [((391, 404), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (402, 404), False, 'import queue\n'), ((194, 210), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (204, 210), False, 'import time\n'), ((477, 516), 'threading.Thread', 'threading.Thread', ([], {'target': 'aaa', 'args': '(i,)'}), '(target=aaa, args=(i,))\n', (493, 516), False, 'import threading\n'), ((594, 610), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (604, 610), False, 'import time\n')] |
# Generated by Django 3.2.5 on 2021-07-06 14:18
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Bailleur",
fields=[
("id", models.AutoField(primary_key=True, serialize=False)),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
("nom", models.CharField(max_length=255)),
("siret", models.CharField(max_length=14)),
("capital_social", models.CharField(max_length=255)),
("siege", models.CharField(max_length=255)),
("dg_nom", models.CharField(max_length=255)),
("dg_fonction", models.CharField(max_length=255)),
("dg_date_deliberation", models.DateField()),
("operation_exceptionnelle", models.TextField()),
("cree_le", models.DateTimeField(auto_now_add=True)),
("mis_a_jour_le", models.DateTimeField(auto_now=True)),
],
options={
"permissions": (
("can_edit_bailleur", "Créer ou mettre à jour un bailleur"),
),
},
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.UUIDField"
] | [((311, 362), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (327, 362), False, 'from django.db import migrations, models\n'), ((390, 442), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)'}), '(default=uuid.uuid4, editable=False)\n', (406, 442), False, 'from django.db import migrations, models\n'), ((469, 501), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (485, 501), False, 'from django.db import migrations, models\n'), ((530, 561), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)'}), '(max_length=14)\n', (546, 561), False, 'from django.db import migrations, models\n'), ((599, 631), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (615, 631), False, 'from django.db import migrations, models\n'), ((660, 692), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (676, 692), False, 'from django.db import migrations, models\n'), ((722, 754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (738, 754), False, 'from django.db import migrations, models\n'), ((789, 821), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (805, 821), False, 'from django.db import migrations, models\n'), ((865, 883), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (881, 883), False, 'from django.db import migrations, models\n'), ((931, 949), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (947, 949), False, 'from django.db import migrations, models\n'), ((980, 1019), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1000, 1019), False, 'from django.db import migrations, models\n'), ((1056, 1091), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1076, 1091), False, 'from django.db import migrations, models\n')] |
import time
import mysql.connector
from optionstrader.customlogging import CustomLog
from optionstrader.parser import Parser
MYSQL_IP_ADDR = '192.168.1.10'
# Used to debug via logs
DEBUG = False
class Database:
def __init__(self):
"""
There's some confusion with database vs table.
We will have separate environments for Dev/Stage and Prd,
so we will want to ensure that the databases are separate.
TODO: Ensure that the Dev/Stage and Prod environments are fully seggregated
with their own databases. This will allows us to migrate the databases when
the time comes.
environment = 'dev' ('dev', 'stage', 'production')
database = "algotrader_".format(environment)
table = ('accounts', 'optionchainanalysis', 'optionchains', 'stocks')
"""
# initiate the connection when the database object is created
# Standard procedure will be to open the connection,
# perform the action, then close the connection
self.log = CustomLog()
self.parser = Parser()
self.connection = self.connect_to_database()
# CONFIGURATION
# Possible Values: "Dev", "Stage", "Production"
# Changebelow code when config file exists
self.environment = "Dev"
self.database_name = "algotrader_dev"
# Below is used to determine how far back in seconds the analyzer tool should go
# The reason behind this is because we do not want to delete stock market date
# Instead, we would rather query the database and only select the records that
# are within the threshold
def connect_to_database(self):
# try:
# Using loopback for testing purposes. Might use socket level later.
return mysql.connector.connect(user='optionstrader_service_account', password='<PASSWORD>',
host=MYSQL_IP_ADDR,
port='3306')
#database='algotrader_data'
#mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on 'localwhost:3306'
# except Exception as e:
# msg = "Error! Please check the MySQL database connection: {error}".format(error=e)
# self.log.debug(msg)
def configure_database(self):
database_name = "algotrader_dev"
self.create_database(database_name)
table_columns = "(account_number TEXT, account_type TEXT, balance FLOAT, total_deposits FLOAT, total_withdrawls FLOAT)"
table_name = "accounts"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT, company_name TEXT)"
table_name = "stocks"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT)"
table_name = "optionchains"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT)"
table_name = "optionchainanalysis"
self.create_table(database_name, table_name, table_columns)
# self.parse_symbols_and_add_to_db()
self.log.debug("Database has been configured")
return True
def create_database(self, database_name):
try:
cursor = self.connection.cursor()
query = ("CREATE DATABASE {database_name}").format(database_name=database_name)
cursor.execute(query)
output = self.connection.commit()
cursor.close()
msg = "Database `{database_name}` created.".format(
database_name=database_name)
self.log.debug(msg)
return True
except:
msg = "Database `{database_name}` can't be created.".format(
database_name=database_name)
self.log.debug(msg)
def create_table(self, database_name, table_name, table_columns):
try:
cursor = self.connection.cursor()
query = "CREATE TABLE {database_name}.{table_name} {table_columns}".format(
database_name=database_name,
table_name=table_name,
table_columns=table_columns)
cursor.execute(query)
output = self.connection.commit()
cursor.close()
msg = "Table `{table_name} created in database `{database_name}`.".format(
database_name=database_name,
table_name=table_name)
self.log.debug(msg)
return True
except:
msg = "Table `{table_name}` can't be created.".format(
table_name=table_name)
self.log.debug(msg)
def close_connection(self):
self.connection.close()
# ====================================
# ====================================
# === Code used for Account Class ====
# ====================================
# ====================================
def update_account(self, balance, account_type):
cursor = self.connection.cursor()
query = ("UPDATE {db}.accounts SET balance={balance} WHERE account_type=\'{account_type}\'".format(
db=self.database_name,
balance=balance,
account_type=account_type))
cursor.execute(query)
self.connection.commit()
cursor.close()
def get_recommended_option_purchase(self):
# TODO
results_table_cursor = self.connection.cursor()
#query = ("SELECT balance FROM accounts{env} where account_type='checking'".format(env=self.environment))
_query = ("SELECT * FROM optionchainanalysisDev ",
"WHERE `total_price_paid_1x` BETWEEN 0 and 100 AND ",
"`potential_profit_1x` BETWEEN 50 and 100 AND ",
"`stock_price_increase` < 3.5 AND ",
"`magic_number` BETWEEN 3 and 10 AND ",
"`expiration_date` LIKE '2017-03-03' AND ",
"`risk_percentage_1x` BETWEEN 0 and 18 ",
"ORDER BY `timestamp` DESC")
query = "".join(_query)
log_msg = query
#
#
self.connection.commit()
result = results_table_cursor.execute(query)
results_table = []
for record in results_table_cursor:
results_table.append(record)
return results_table
#for record in results_table:
# return record
def get_list_of_tickers(self, query_type='default'):
# TODO Implement the following:
# We will want to stream data from external to the database then stream the symbols from the database
# as they're made available.
table = 'optionchains'
if query_type == 'default':
# Run the normal code here
query = "SELECT DISTINCT symbol FROM {db}.stocks WHERE symbol is not Null".format(
db=self.database_name)
if query_type == 'options_only':
# Run the code to only retrieve symbols which have had stock options in the past
query = "SELECT DISTINCT underlying FROM {db}.{table} WHERE underlying is not Null".format(
db=self.database_name,
table=table,
env=self.environment)
if query_type == 'one_option_only':
# Arbritrary first option only.
# Usually used for testing purposes
query = "SELECT DISTINCT underlying FROM {db}.{table} WHERE underlying is not Null LIMIT 1".format(
db=self.database_name,
table=table,
env=self.environment)
else:
# Run a special SQL query here, which returns the symbols in a specific order
pass
cursor = self.connection.cursor()
# As of 2/11/17, there are 3078 total results from this query
self.connection.commit()
result = cursor.execute(query)
print(result)
list_of_tickers = list()
for ticker in cursor:
#print(ticker[0])
list_of_tickers.append(ticker[0])
# Return type is a python list [u'AAPL', ..., u'GOOG']
return list_of_tickers
def get_current_stock_price(self, symbol):
# We want to make sure that the 'last_' price is within reason. We don't want to
# pay 100x the average price of the item.
cursor = self.connection.cursor(dictionary=True)
query = "SELECT * FROM {db}.stocks WHERE symbol LIKE \'{symbol}\' ORDER BY `timestamp` DESC LIMIT 1".format(
db=self.database_name,
symbol=symbol)
self.connection.commit()
result = cursor.execute(query)
for stock_data in cursor:
return stock_data['last_']
def get_example_option_chains(self, num_chains_limit=1):
# This function has a much less accurate query than query_option_chains_for_analysis
# This function is typically used for testing purposes
cursor = self.connection.cursor(dictionary=True, buffered=True)
query = ("SELECT * from {db}.optionchains LIMIT {num_chains_limit}".format(
db=self.database_name,
num_chains_limit=num_chains_limit))
self.connection.commit()
cursor.execute(query)
self.log.debug("****Type:{0}".format(type(cursor)))
return cursor
# Only iterate once
#for option_chain in cursor:
# return option_chain, cursor[option_chain]
# list_of_option_chains is all of the option chains for the ticker
# therefore, we need to select and return the most recent one.
cursor = self.connection.cursor()
# As of 2/11/17, there are 3078 total results from this query
query = "SELECT * from {db}.optionchains LIMIT 1".format(
db=self.database_name)
self.connection.commit()
option_chain = cursor.execute(query)
return option_chain
def query_option_chains_for_analysis(self,
ticker=None, current_timestamp=int(time.time()), time_threshold=30000,
max_num_option_chains=40):
# This function has a more precise query than get_example_option_chains
# If no tickers are specified, retrieve the most recent option_chains
if ticker == None:
cursor = self.connection.cursor(dictionary=True, buffered=True)
query_1 = "SELECT * FROM {db}.optionchains WHERE type LIKE 'option' and ".format(
db=self.database_name)
query_2 = "timestamp > ({current_timestamp}-{time_threshold}) and ".format(
time_threshold=time_threshold,
current_timestamp=current_timestamp)
query_3 = "option_type LIKE 'call' ORDER BY `timestamp` DESC LIMIT {max_num_option_chains}".format(max_num_option_chains=max_num_option_chains)
query = (query_1 + query_2 + query_3)
self.log.debug(query)
result = cursor.execute(query)
self.log.debug(cursor.fetchone())
self.connection.commit()
# If a ticker is specified, retrieve the most recent option_chains
else:
# We want to return the dictionary type
# we need a MySQL buffered response
cursor = self.connection.cursor(dictionary=True, buffered=True)
query_1 = "SELECT * FROM {db}.optionchains WHERE type LIKE 'option' and ".format(
db=self.database_name)
query_2 = "timestamp > ({current_timestamp}-{time_threshold}) and underlying LIKE '{ticker}' and ".format(ticker=ticker,
time_threshold=time_threshold,
current_timestamp=current_timestamp)
query_3 = "option_type LIKE 'call' ORDER BY `timestamp` DESC LIMIT {max_num_option_chains}".format(max_num_option_chains=max_num_option_chains)
query = (query_1 + query_2 + query_3)
result = cursor.execute(query)
self.connection.commit()
"""
# cursor is a MySQLCursorDict object.
# cursor is a MySQLCursorDict: SELECT * FROM optionchainsDev WHERE type..
# retrieve results using cursor.fetchall()
"""
return cursor
# DEPRICATED
#result = cursor.execute(query)
# Iterate over all options in the option chains in the database for that ticker.
# Sorted by time in descending order
#all_options = []
#for option_chain in cursor:
# all_options.append(option_chain)
#return all_options
def sanitize_field_names(self, field_name):
sanitized_field_names_pairs = {
'change': 'change_',
'close': 'close_',
'open': 'open_',
'last': 'last_'
}
field_name = str(field_name)
for name in sanitized_field_names_pairs.keys():
if field_name == name:
sanitized_field_name = sanitized_field_names_pairs[name]
return sanitized_field_name
return field_name
def save_option_chain_to_table(self, option_chain, table='optionchains'):
# PLEASE NOTE:
# If a new keyword (column) is detected, then the INSERT INTO command will fail
# The next time that the option chain is attempted to be saved, the record
# will update.
attempt_number = 0
while True:
try:
# add timestamp here
option_chain['timestamp']=int(time.time())
cursor = self.connection.cursor()
#"{} {}".format(str(a.keys()).replace("'", ""), str(a.values()).replace("'", ""))
#option_chain.keys(), option_chain.values()
KEYS = [self.sanitize_field_names(i) for i in option_chain.keys()]
VALUES = [str(i) for i in option_chain.values()]
# Should never have the single character apostrophy.
# Error out, if it contains once
keys_error = [str(i).find("'") for i in option_chain.keys()]
values_error = [str(i).find("'") for i in option_chain.values()]
if max(max(keys_error), max(values_error)) != -1:
log_msg = ""
log_msg = "Error: single character apostrophy located in option_chain!"
keys_formatted = str("(" + str(KEYS)[1:-1] + ")").replace("'", "")
values_formatted = str("(" + str(VALUES)[1:-1] + ")")
query = ("INSERT INTO {db}.{table} {keys} VALUES {values}").format(
db=self.database_name,
table=table,
keys=keys_formatted,
values=values_formatted)
log_msg = "~~~~-----------------~~~"
query = query.replace("'None'", 'NULL')
if DEBUG is True:
print(query)
cursor.execute(query)
self.connection.commit()
cursor.close()
# Break the while loop
break
except mysql.connector.ProgrammingError:
# This means that the fields don't exist on the database
# time to add the fields to the database
log_msg = "Warning. Trying to update the database with fields which don't yet exist in the table."
# Unsure which key is the problem one.
# Try to create a field with each key.
# if the key is already a field on the database, then pass without error
for field_name in KEYS:
# mySQL database needs specific table names to be off limits
try:
field_type = self.type_conversion(option_chain[field_name])
except:
field_type = self.type_conversion(option_chain[field_name[:-1]])
try:
self.add_new_column_to_table(field_name, field_type, table=table)
except mysql.connector.ProgrammingError:
pass
log_msg = "Information. The fields were updated in table '{0}'.".format(table)
if attempt_number == 1:
log_msg = "Error: Unable to update SQL table"
break
else:
log_msg = "Retrying the update to the table"
attempt_number += 1
return True
def update_option_chain_with_analysis(self, percentage_increase_analysis):
# This is the analysis done for the percentage increase (1,2,5 percent)
# of an underlyer
result = self.save_option_chain_to_table(percentage_increase_analysis, table='optionchainanalysis')
return True
def add_new_column_to_table(self, column_name, data_type, table):
cursor = self.connection.cursor()
env = self.environment
query = "ALTER TABLE {db}.{table} ADD {column_name} {data_type}".format(
db=self.database_name,
table=table,
column_name=column_name,
data_type=data_type)
cursor.execute(query)
self.connection.commit()
return True
def add_money_to_account(self, amount_of_money, account_type):
current_balance = self.get_checking_account_balance()
output = str(current_balance + amount_of_money)
self.update_checking_account(output)
print(self.get_checking_account_balance())
def subtract_money_from_account(self, amount_of_money, account_type):
current_balance = self.get_checking_account_balance()
output = str(current_balance - amount_of_money)
self.update_checking_account(output)
print(self.get_checking_account_balance())
def add_field_to_table(self, field, _type):
cursor = self.connection.cursor()
#query = ("ALTER TABLE stocks ADD %s %s") % (field, type)
query = "ALTER TABLE {db}.stocks ADD {field} {type}".format(
db=self.database_name,
field=field,
type=_type)
cursor.execute(query)
self.connection.commit()
cursor.close()
def insert_values_into_table(self, column_string, value_string):
cursor = self.connection.cursor()
query = "INSERT INTO {db}.stocks {column_string} VALUES {value_string}".format(
db=self.database_name,
column_string=column_string,
value_string=value_string)
self.log.debug(query)
cursor.execute(query)
self.connection.commit()
cursor.close()
def type_conversion(self, object_item):
# We need to convert the types so that the sql database knows what to do
# The names of the types differs between python and mysql
# Examples: unicode, NoneType, int, float
obj_type = type(object_item)
#self.log.debug(object_item)
#self.log.debug(obj_type)
obj_type_str = str(obj_type).split("'")[1]
if obj_type_str == 'unicode':
return "text"
if obj_type_str == 'float':
return "float"
if obj_type_str == 'NoneType':
return "text"
if obj_type_str == 'int':
return "bigint(20)"
else:
return "text"
def parse_symbols_and_add_to_db(self):
# technically this should go in a separate test_parser module... TODO.
results = self.parser.extract_symbols()
for symbol_and_name in results[1:]:
column_string = "(symbol, company_name)"
value_string = "(\"{symbol}\", \"{company_name}\")".format(
symbol=symbol_and_name[0],company_name=symbol_and_name[1])
self.insert_values_into_table(column_string, value_string)
msg = "Symbols parsed and added to database"
self.log.debug(msg)
return results
| [
"optionstrader.customlogging.CustomLog",
"optionstrader.parser.Parser",
"time.time"
] | [((1046, 1057), 'optionstrader.customlogging.CustomLog', 'CustomLog', ([], {}), '()\n', (1055, 1057), False, 'from optionstrader.customlogging import CustomLog\n'), ((1080, 1088), 'optionstrader.parser.Parser', 'Parser', ([], {}), '()\n', (1086, 1088), False, 'from optionstrader.parser import Parser\n'), ((9871, 9882), 'time.time', 'time.time', ([], {}), '()\n', (9880, 9882), False, 'import time\n'), ((13314, 13325), 'time.time', 'time.time', ([], {}), '()\n', (13323, 13325), False, 'import time\n')] |
# Generated by Django 3.2.9 on 2021-11-24 02:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Symbol',
fields=[
('symbol_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('ticker', models.CharField(max_length=30)),
('description', models.TextField(blank=True, null=True)),
('sector', models.CharField(blank=True, max_length=30, null=True)),
('asset_type', models.CharField(blank=True, max_length=30, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'symbol',
'managed': True,
},
),
migrations.CreateModel(
name='BarDataWeekly',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_weekly',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarDataMonthly',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_monthly',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarDataDaily',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_daily',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData5Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_5min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData30Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_30min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData1Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_1min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData1H',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_1h',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData15Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_15min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
]
| [
"django.db.models.FloatField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((342, 393), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (358, 393), False, 'from django.db import migrations, models\n'), ((421, 453), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (437, 453), False, 'from django.db import migrations, models\n'), ((483, 514), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (499, 514), False, 'from django.db import migrations, models\n'), ((549, 588), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (565, 588), False, 'from django.db import migrations, models\n'), ((618, 672), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, max_length=30, null=True)\n', (634, 672), False, 'from django.db import migrations, models\n'), ((706, 760), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(30)', 'null': '(True)'}), '(blank=True, max_length=30, null=True)\n', (722, 760), False, 'from django.db import migrations, models\n'), ((796, 839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (816, 839), False, 'from django.db import migrations, models\n'), ((880, 923), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (900, 923), False, 'from django.db import migrations, models\n'), ((1177, 1232), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (1197, 1232), False, 'from django.db import migrations, models\n'), ((1266, 1285), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1283, 1285), False, 'from django.db import migrations, models\n'), ((1319, 1338), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1336, 1338), False, 'from django.db import migrations, models\n'), ((1371, 1390), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1388, 1390), False, 'from django.db import migrations, models\n'), ((1425, 1444), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (1442, 1444), False, 'from django.db import migrations, models\n'), ((1483, 1523), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1500, 1523), False, 'from django.db import migrations, models\n'), ((1553, 1574), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1572, 1574), False, 'from django.db import migrations, models\n'), ((1613, 1653), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1630, 1653), False, 'from django.db import migrations, models\n'), ((1688, 1728), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1705, 1728), False, 'from django.db import migrations, models\n'), ((1764, 1807), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1784, 1807), False, 'from django.db import migrations, models\n'), ((1848, 1891), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1868, 1891), False, 'from django.db import migrations, models\n'), ((1921, 2008), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (1938, 2008), False, 'from django.db import migrations, models\n'), ((2329, 2384), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (2349, 2384), False, 'from django.db import migrations, models\n'), ((2418, 2437), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (2435, 2437), False, 'from django.db import migrations, models\n'), ((2471, 2490), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (2488, 2490), False, 'from django.db import migrations, models\n'), ((2523, 2542), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (2540, 2542), False, 'from django.db import migrations, models\n'), ((2577, 2596), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (2594, 2596), False, 'from django.db import migrations, models\n'), ((2635, 2675), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2652, 2675), False, 'from django.db import migrations, models\n'), ((2705, 2726), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (2724, 2726), False, 'from django.db import migrations, models\n'), ((2765, 2805), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2782, 2805), False, 'from django.db import migrations, models\n'), ((2840, 2880), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2857, 2880), False, 'from django.db import migrations, models\n'), ((2916, 2959), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2936, 2959), False, 'from django.db import migrations, models\n'), ((3000, 3043), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3020, 3043), False, 'from django.db import migrations, models\n'), ((3073, 3160), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (3090, 3160), False, 'from django.db import migrations, models\n'), ((3480, 3535), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (3500, 3535), False, 'from django.db import migrations, models\n'), ((3569, 3588), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3586, 3588), False, 'from django.db import migrations, models\n'), ((3622, 3641), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3639, 3641), False, 'from django.db import migrations, models\n'), ((3674, 3693), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3691, 3693), False, 'from django.db import migrations, models\n'), ((3728, 3747), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3745, 3747), False, 'from django.db import migrations, models\n'), ((3786, 3826), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3803, 3826), False, 'from django.db import migrations, models\n'), ((3856, 3877), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3875, 3877), False, 'from django.db import migrations, models\n'), ((3916, 3956), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3933, 3956), False, 'from django.db import migrations, models\n'), ((3991, 4031), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4008, 4031), False, 'from django.db import migrations, models\n'), ((4067, 4110), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4087, 4110), False, 'from django.db import migrations, models\n'), ((4151, 4194), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4171, 4194), False, 'from django.db import migrations, models\n'), ((4224, 4311), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (4241, 4311), False, 'from django.db import migrations, models\n'), ((4628, 4683), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (4648, 4683), False, 'from django.db import migrations, models\n'), ((4717, 4736), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4734, 4736), False, 'from django.db import migrations, models\n'), ((4770, 4789), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4787, 4789), False, 'from django.db import migrations, models\n'), ((4822, 4841), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4839, 4841), False, 'from django.db import migrations, models\n'), ((4876, 4895), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (4893, 4895), False, 'from django.db import migrations, models\n'), ((4934, 4974), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4951, 4974), False, 'from django.db import migrations, models\n'), ((5004, 5025), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5023, 5025), False, 'from django.db import migrations, models\n'), ((5064, 5104), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5081, 5104), False, 'from django.db import migrations, models\n'), ((5139, 5179), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5156, 5179), False, 'from django.db import migrations, models\n'), ((5215, 5258), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5235, 5258), False, 'from django.db import migrations, models\n'), ((5299, 5342), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5319, 5342), False, 'from django.db import migrations, models\n'), ((5372, 5459), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (5389, 5459), False, 'from django.db import migrations, models\n'), ((5776, 5831), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (5796, 5831), False, 'from django.db import migrations, models\n'), ((5865, 5884), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (5882, 5884), False, 'from django.db import migrations, models\n'), ((5918, 5937), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (5935, 5937), False, 'from django.db import migrations, models\n'), ((5970, 5989), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (5987, 5989), False, 'from django.db import migrations, models\n'), ((6024, 6043), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (6041, 6043), False, 'from django.db import migrations, models\n'), ((6082, 6122), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6099, 6122), False, 'from django.db import migrations, models\n'), ((6152, 6173), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (6171, 6173), False, 'from django.db import migrations, models\n'), ((6212, 6252), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6229, 6252), False, 'from django.db import migrations, models\n'), ((6287, 6327), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6304, 6327), False, 'from django.db import migrations, models\n'), ((6363, 6406), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6383, 6406), False, 'from django.db import migrations, models\n'), ((6447, 6490), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (6467, 6490), False, 'from django.db import migrations, models\n'), ((6520, 6607), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (6537, 6607), False, 'from django.db import migrations, models\n'), ((6924, 6979), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (6944, 6979), False, 'from django.db import migrations, models\n'), ((7013, 7032), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (7030, 7032), False, 'from django.db import migrations, models\n'), ((7066, 7085), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (7083, 7085), False, 'from django.db import migrations, models\n'), ((7118, 7137), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (7135, 7137), False, 'from django.db import migrations, models\n'), ((7172, 7191), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (7189, 7191), False, 'from django.db import migrations, models\n'), ((7230, 7270), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7247, 7270), False, 'from django.db import migrations, models\n'), ((7300, 7321), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (7319, 7321), False, 'from django.db import migrations, models\n'), ((7360, 7400), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7377, 7400), False, 'from django.db import migrations, models\n'), ((7435, 7475), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7452, 7475), False, 'from django.db import migrations, models\n'), ((7511, 7554), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7531, 7554), False, 'from django.db import migrations, models\n'), ((7595, 7638), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (7615, 7638), False, 'from django.db import migrations, models\n'), ((7668, 7755), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (7685, 7755), False, 'from django.db import migrations, models\n'), ((8069, 8124), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (8089, 8124), False, 'from django.db import migrations, models\n'), ((8158, 8177), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (8175, 8177), False, 'from django.db import migrations, models\n'), ((8211, 8230), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (8228, 8230), False, 'from django.db import migrations, models\n'), ((8263, 8282), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (8280, 8282), False, 'from django.db import migrations, models\n'), ((8317, 8336), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (8334, 8336), False, 'from django.db import migrations, models\n'), ((8375, 8415), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8392, 8415), False, 'from django.db import migrations, models\n'), ((8445, 8466), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (8464, 8466), False, 'from django.db import migrations, models\n'), ((8505, 8545), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8522, 8545), False, 'from django.db import migrations, models\n'), ((8580, 8620), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8597, 8620), False, 'from django.db import migrations, models\n'), ((8656, 8699), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8676, 8699), False, 'from django.db import migrations, models\n'), ((8740, 8783), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (8760, 8783), False, 'from django.db import migrations, models\n'), ((8813, 8900), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (8830, 8900), False, 'from django.db import migrations, models\n'), ((9215, 9270), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (9235, 9270), False, 'from django.db import migrations, models\n'), ((9304, 9323), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9321, 9323), False, 'from django.db import migrations, models\n'), ((9357, 9376), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9374, 9376), False, 'from django.db import migrations, models\n'), ((9409, 9428), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9426, 9428), False, 'from django.db import migrations, models\n'), ((9463, 9482), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (9480, 9482), False, 'from django.db import migrations, models\n'), ((9521, 9561), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9538, 9561), False, 'from django.db import migrations, models\n'), ((9591, 9612), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (9610, 9612), False, 'from django.db import migrations, models\n'), ((9651, 9691), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9668, 9691), False, 'from django.db import migrations, models\n'), ((9726, 9766), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9743, 9766), False, 'from django.db import migrations, models\n'), ((9802, 9845), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9822, 9845), False, 'from django.db import migrations, models\n'), ((9886, 9929), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (9906, 9929), False, 'from django.db import migrations, models\n'), ((9959, 10046), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""api.symbol"""'}), "(on_delete=django.db.models.deletion.DO_NOTHING, to=\n 'api.symbol')\n", (9976, 10046), False, 'from django.db import migrations, models\n')] |