text string | size int64 | token_count int64 |
|---|---|---|
# For Time Logging
import time
from contextlib import contextmanager
import logging
@contextmanager
# Timing Function
def time_usage(name=""):
"""
log the time usage in a code block
"""
# print ("In time_usage runID = {}".format(runID))
start = time.time()
yield
end = time.time()
elapsed_seconds = float("%.10f" % (end - start))
logging.info('%s: Time Taken (seconds): %s', name, elapsed_seconds)
| 436 | 139 |
import os
import shutil
path = './ALL/'
outpath = "./rename/"
outb = "./b/"
outc = "./c/"
for f in os.listdir(path):
print(f)
name,ext = os.path.splitext(f)
a,ext2 = name.split('_')
if ext2.endswith('b'):
print(outb+f)
shutil.copy(path+f,outb+f)
elif ext2.endswith('c'):
print(outc+f)
shutil.copy(path+f,outc+f)
print(a)
#shutil.copy(path+f,outpath+a+ext) | 416 | 182 |
import socket
import threading
import concurrent.futures
from colorama import Fore, Style
def port_scanner():
print_lock = threading.Lock()
print('')
ip = input(Fore.CYAN + "[PORT_SCANNER]" + Style.RESET_ALL + "[>] Target: ")
port_range = input(Fore.CYAN + "[PORT_SCANNER]" + Style.RESET_ALL + "[>] Scan up to port (ex would be 1000): ")
print('')
def scan(ip, port):
scanner = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
scanner.settimeout(1)
try:
scanner.connect((ip, port))
scanner.close()
with print_lock:
print(Style.RESET_ALL + Fore.GREEN + "[" + ip + "]" + Style.RESET_ALL + f" Port {Fore.RED}{port}" + Style.RESET_ALL + " is open on the target")
except:
pass
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
for port in range(int(port_range)):
executor.submit(scan, ip, port + 1) | 970 | 334 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(513, 403)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setEnabled(False)
self.pushButton.setGeometry(QtCore.QRect(80, 70, 91, 31))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setEnabled(False)
self.pushButton_2.setGeometry(QtCore.QRect(10, 70, 71, 31))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setEnabled(False)
self.pushButton_3.setGeometry(QtCore.QRect(194, 20, 311, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setEnabled(False)
self.pushButton_4.setGeometry(QtCore.QRect(194, 72, 311, 31))
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton2.setGeometry(QtCore.QRect(10, 20, 161, 31))
self.pushButton2.setObjectName("pushButton2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(200, 0, 311, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(200, 50, 281, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(30, 110, 461, 251))
self.label_3.setText("")
self.label_3.setPixmap(QtGui.QPixmap("C:/Users/arsen/Desktop/riemann.jpg"))
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 513, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Головне Меню"))
self.pushButton.setText(_translate("MainWindow", "Відобразити все"))
self.pushButton_2.setText(_translate("MainWindow", "Відобразити"))
self.pushButton_3.setText(_translate("MainWindow", "Створити"))
self.pushButton_4.setText(_translate("MainWindow", "Створити"))
self.pushButton2.setText(_translate("MainWindow", "Задати аналітичну функцію"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#aa0000;\">Мінімальна поверхня з квазіконформною заміною параметра</span></p></body></html>"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#005500;\">Мінімальна поверхня з конформною заміною параметра</span></p></body></html>"))
| 3,720 | 1,314 |
#!/usr/bin/env python
# coding=utf-8
from pyecharts.chart import Chart
from pyecharts.option import get_all_options
class Scatter(Chart):
"""
<<< 散点图 >>>
直角坐标系上的散点图可以用来展现数据的 x,y 之间的关系,如果数据项有多个维度,
可以用颜色来表现,利用 geo 组件。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Scatter, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, x_axis, y_axis, extra_data=None,
symbol_size=10,
**kwargs):
"""
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。
:param y_axis:
y 坐标轴数据。
:param extra_data:
第三维度数据,x 轴为第一个维度,y 轴为第二个维度。(可在 visualmap 中
将视图元素映射到第三维度)。
:param symbol_size:
标记图形大小,默认为 10。
:param kwargs:
"""
assert len(x_axis) == len(y_axis)
kwargs.update(type="scatter", x_axis=x_axis)
chart = get_all_options(**kwargs)
xaxis, yaxis = chart['xy_axis']
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get('legend')[0].get('data').append(name)
if extra_data:
_data = [list(z) for z in zip(x_axis, y_axis, extra_data)]
else:
_data = [list(z) for z in zip(x_axis, y_axis)]
self._option.get('series').append({
"type": "scatter",
"name": name,
"symbol": chart['symbol'],
"symbolSize": symbol_size,
"data": _data,
"label": chart['label'],
"seriesId": self._option.get('series_id'),
})
self._config_components(**kwargs)
def draw(self, path, color=None):
""" 将图片上的像素点转换为数组,如 color 为(255,255,255)时只保留非白色像素点的
坐标信息返回两个 k_lst, v_lst 两个列表刚好作为散点图的数据项
:param path:
转换图片的地址
:param color:
所要排除的颜色
:return:
转换后的数组
"""
raise Exception("Not implemented")
| 2,056 | 848 |
import json
class AliceRequest(object):
def __init__(self, request_dict):
self._request_dict = request_dict
@property
def version(self):
return self._request_dict['version']
@property
def session(self):
return self._request_dict['session']
@property
def user_id(self):
return self.session['user_id']
@property
def is_new_session(self):
return bool(self.session['new'])
@property
def command(self):
print(self._request_dict['request']['command'])
return self._request_dict['request']['command']
@property
def tokens(self):
return self._request_dict['request']['nlu']['tokens']
def get_number(self):
for e in self._request_dict['request']['nlu'].get('entities'):
if e.get('type') == "YANDEX.NUMBER":
return e.get('value')
return None
def __str__(self):
return str(self._request_dict)
class AliceResponse(object):
def __init__(self, alice_request):
self._response_dict = {
"version": alice_request.version,
"session": alice_request.session,
"response": {
"end_session": False
}
}
def dumps(self):
return json.dumps(
self._response_dict,
ensure_ascii=False,
indent=2
)
def set_text(self, text):
self._response_dict['response']['text'] = text[:1024]
def get_text(self):
return self._response_dict['response']['text']
def set_buttons(self, buttons):
self._response_dict['response']['buttons'] = buttons
def end(self):
self._response_dict["response"]["end_session"] = True
def __str__(self):
return self.dumps()
| 1,799 | 525 |
import yfinance as yf
import pandas as pd
import Utils
from Utils import scrape_utils
def qtrlabels(tick,date,comp='plain'):
stock = yf.Ticker(tick.upper())
hist = stock.history(interval="1d",start=scrape_utils.dashdate(scrape_utils.fixdate(date)),end=scrape_utils.qenddate(date))
stkchg = hist.iloc[-1].Close/hist.iloc[0].Close
if comp == 'plain':
return int(stkchg>1)
elif comp == 's&p' or comp == 'sector':
#experimental code to find percent change rather than absolute change
if comp == 's&p':
sandptick = yf.Ticker('SPY')
sandp = sandptick.history(interval="1d",start=scrape_utils.dashdate(scrape_utils.fixdate(date)),end=scrape_utils.qenddate(date))
sandpchg = (sandp.iloc[-1].Close/sandp.iloc[0].Close)-1
return int(stkchg-1>sandpchg)
elif comp == 'sector':
print('NOT YET SUPPORTED')
return None
| 869 | 334 |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.core.interfaces import ImmutableInterface
from pymor.core.logger import getLogger
from pymor.reductors.basic import reduce_generic_rb
from pymor.reductors.residual import reduce_residual, reduce_implicit_euler_residual
from pymor.operators.constructions import IdentityOperator
from pymor.algorithms.timestepping import ImplicitEulerTimeStepper
def reduce_parabolic(discretization, RB, product=None, coercivity_estimator=None,
disable_caching=True, extends=None):
r"""Reductor for parabolic equations.
This reductor uses :meth:`~pymor.reductors.basic.reduce_generic_rb` for the actual
RB-projection. The only addition is the assembly of an error estimator which
bounds the discrete l2-in time / energy-in space error similar to [GP05]_, [HO08]_
as follows:
.. math::
\left[ C_a^{-1}(\mu)\|e_N(\mu)\|^2 + \sum_{n=1}^{N} \Delta t\|e_n(\mu)\|^2_e \right]^{1/2}
\leq \left[ C_a^{-1}(\mu)\Delta t \sum_{n=1}^{N}\|\mathcal{R}^n(u_n(\mu), \mu)\|^2_{e,-1}
+ C_a^{-1}(\mu)\|e_0\|^2 \right]^{1/2}
Here, :math:`\|\cdot\|` denotes the norm induced by the problem's mass matrix
(e.g. the L^2-norm) and :math:`\|\cdot\|_e` is an arbitrary energy norm w.r.t.
which the space operator :math:`A(\mu)` is coercive, and :math:`C_a(\mu)` is a
lower bound for its coercivity constant. Finally, :math:`\mathcal{R}^n` denotes
the implicit Euler timestepping residual for the (fixed) time step size :math:`\Delta t`,
.. math::
\mathcal{R}^n(u_n(\mu), \mu) :=
f - M \frac{u_{n}(\mu) - u_{n-1}(\mu)}{\Delta t} - A(u_n(\mu), \mu),
where :math:`M` denotes the mass operator and :math:`f` the source term.
The dual norm of the residual is computed using the numerically stable projection
from [BEOR14]_.
.. warning::
The reduced basis `RB` is required to be orthonormal w.r.t. the given
energy product. If not, the projection of the initial values will be
computed incorrectly.
.. [GP05] M. A. Grepl, A. T. Patera, A Posteriori Error Bounds For Reduced-Basis
Approximations Of Parametrized Parabolic Partial Differential Equations,
M2AN 39(1), 157-181, 2005.
.. [HO08] B. Haasdonk, M. Ohlberger, Reduced basis method for finite volume
approximations of parametrized evolution equations,
M2AN 42(2), 277-302, 2008.
Parameters
----------
discretization
The |InstationaryDiscretization| which is to be reduced.
RB
|VectorArray| containing the reduced basis on which to project.
product
The energy inner product |Operator| w.r.t. the reduction error is estimated.
RB must be to be orthonomrmal w.r.t. this product!
coercivity_estimator
`None` or a |Parameterfunctional| returning a lower bound :math:`C_a(\mu)`
for the coercivity constant of `discretization.operator` w.r.t. `product`.
disable_caching
If `True`, caching of solutions is disabled for the reduced |Discretization|.
extends
Set by :meth:`~pymor.algorithms.greedy.greedy` to the result of the
last reduction in case the basis extension was `hierarchic` (used to prevent
re-computation of residual range basis vectors already obtained from previous
reductions).
Returns
-------
rd
The reduced |Discretization|.
rc
The reconstructor providing a `reconstruct(U)` method which reconstructs
high-dimensional solutions from solutions `U` of the reduced |Discretization|.
reduction_data
Additional data produced by the reduction process (compare the
`extends` parameter).
"""
assert extends is None or len(extends) == 3
assert isinstance(discretization.time_stepper, ImplicitEulerTimeStepper)
logger = getLogger('pymor.reductors.parabolic.reduce_parabolic')
old_residual_data = extends[2].pop('residual') if extends else None
old_initial_resdidual_data = extends[2].pop('initial_residual') if extends else None
with logger.block('RB projection ...'):
rd, rc, data = reduce_generic_rb(discretization, RB, vector_product=product,
disable_caching=disable_caching, extends=extends)
dt = discretization.T / discretization.time_stepper.nt
with logger.block('Assembling error estimator ...'):
residual, residual_reconstructor, residual_data = reduce_implicit_euler_residual(
discretization.operator, discretization.mass, dt, discretization.rhs,
RB, product=product, extends=old_residual_data
)
initial_residual, initial_residual_reconstructor, initial_residual_data = reduce_residual(
IdentityOperator(discretization.solution_space), discretization.initial_data, RB, False,
product=discretization.l2_product, extends=old_initial_resdidual_data
)
estimator = ReduceParabolicEstimator(residual, residual_data.get('residual_range_dims', None),
initial_residual, initial_residual_data.get('residual_range_dims', None),
coercivity_estimator)
rd = rd.with_(estimator=estimator)
data.update(residual=(residual, residual_reconstructor, residual_data),
initial_residual=(initial_residual, initial_residual_reconstructor, initial_residual_data))
return rd, rc, data
class ReduceParabolicEstimator(ImmutableInterface):
"""Instantiated by :func:`reduce_parabolic`.
Not to be used directly.
"""
def __init__(self, residual, residual_range_dims, initial_residual, initial_residual_range_dims,
coercivity_estimator):
self.residual = residual
self.residual_range_dims = residual_range_dims
self.initial_residual = initial_residual
self.initial_residual_range_dims = initial_residual_range_dims
self.coercivity_estimator = coercivity_estimator
def estimate(self, U, mu, discretization, return_error_sequence=False):
dt = discretization.T / discretization.time_stepper.nt
C = self.coercivity_estimator(mu) if self.coercivity_estimator else 1.
est = np.empty(len(U))
est[0] = (1./C) * self.initial_residual.apply(U, ind=0, mu=mu).l2_norm2()[0]
est[1:] = self.residual.apply(U, U, ind=list(range(1, len(U))), ind_old=list(range(0, len(U) - 1)),
mu=mu).l2_norm2()
est[1:] *= (dt/C**2)
est = np.sqrt(np.cumsum(est))
return est if return_error_sequence else est[-1]
def restricted_to_subbasis(self, dim, discretization):
if self.residual_range_dims and self.initial_residual_range_dims:
residual_range_dims = self.residual_range_dims[:dim + 1]
residual = self.residual.projected_to_subbasis(residual_range_dims[-1], dim)
initial_residual_range_dims = self.initial_residual_range_dims[:dim + 1]
initial_residual = self.initial_residual.projected_to_subbasis(initial_residual_range_dims[-1], dim)
return ReduceParabolicEstimator(residual, residual_range_dims,
initial_residual, initial_residual_range_dims,
self.coercivity_estimator)
else:
self.logger.warn('Cannot efficiently reduce to subbasis')
return ReduceParabolicEstimator(self.residual.projected_to_subbasis(None, dim), None,
self.initial_residual.projected_to_subbasis(None, dim), None,
self.coercivity_estimator)
| 8,014 | 2,574 |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import get_datetime
from frappe.core.doctype.scheduled_job_type.scheduled_job_type import sync_jobs
class TestScheduledJobType(unittest.TestCase):
def setUp(self):
frappe.db.rollback()
frappe.db.sql('truncate `tabScheduled Job Type`')
sync_jobs()
frappe.db.commit()
def test_sync_jobs(self):
all_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.email.queue.flush'))
self.assertEqual(all_job.frequency, 'All')
daily_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.email.queue.clear_outbox'))
self.assertEqual(daily_job.frequency, 'Daily')
# check if cron jobs are synced
cron_job = frappe.get_doc('Scheduled Job Type',
dict(method='frappe.oauth.delete_oauth2_data'))
self.assertEqual(cron_job.frequency, 'Cron')
self.assertEqual(cron_job.cron_format, '0/15 * * * *')
# check if jobs are synced after change in hooks
updated_scheduler_events = { "hourly": ["frappe.email.queue.flush"] }
sync_jobs(updated_scheduler_events)
updated_scheduled_job = frappe.get_doc("Scheduled Job Type", {"method": "frappe.email.queue.flush"})
self.assertEqual(updated_scheduled_job.frequency, "Hourly")
def test_daily_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.email.queue.clear_outbox'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-02 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 23:59:59')))
def test_weekly_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.social.doctype.energy_point_log.energy_point_log.send_weekly_summary'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-06 00:00:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-02 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-05 23:59:59')))
def test_monthly_job(self):
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.email.doctype.auto_email_report.auto_email_report.send_monthly'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-02-01 00:00:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-15 00:00:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-31 23:59:59')))
def test_cron_job(self):
# runs every 15 mins
job = frappe.get_doc('Scheduled Job Type', dict(method = 'frappe.oauth.delete_oauth2_data'))
job.db_set('last_execution', '2019-01-01 00:00:00')
self.assertTrue(job.is_event_due(get_datetime('2019-01-01 00:15:01')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:05:06')))
self.assertFalse(job.is_event_due(get_datetime('2019-01-01 00:14:59'))) | 3,050 | 1,374 |
# menu_text.py
#
# simple python menu
# https://stackoverflow.com/questions/19964603/creating-a-menu-in-python
#
city_menu = { '1': 'Chicago',
'2': 'New York',
'3': 'Washington',
'x': 'Exit'}
month_menu = {'0': 'All',
'1': 'January',
'2': 'February',
'3': 'March',
'4': 'April',
'5': 'May',
'6': 'June',
'x': 'Exit'}
weekday_menu = {'0': 'All',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
'7': 'Sunday',
'x': 'Exit'}
def get_menu_item(menu):
while True:
print('------------')
print('Menu Options')
print('------------')
options = list(menu.keys())
options.sort()
for entry in options:
print( entry, menu[entry] )
selection = input("Please Select: ")
# in case X entered for exit
if selection.isupper():
selection = selection.lower()
if selection in options:
#print(menu[selection])
break
else:
print( "Unknown Option Selected!" )
return selection
def main():
city_selected = get_menu_item(city_menu)
print('\n Selected Selected City: ', city_menu[city_selected])
month_selected = get_menu_item(month_menu)
print('\n Selected Month: ', month_menu[month_selected])
print("month", month_selected)
day_selected = get_menu_item(weekday_menu)
print('\n Selected Weekday: ', weekday_menu[day_selected])
if __name__ == "__main__":
main()
| 1,750 | 531 |
from src.books.models import Book
from src.books.schema import BookOut
from ninja import Router
router = Router()
@router.get("/", response=list[BookOut])
def get_books(request):
return Book.objects.all()
| 213 | 68 |
from getratings.models.ratings import Ratings
class NA_Khazix_Sup_Aatrox(Ratings):
pass
class NA_Khazix_Sup_Ahri(Ratings):
pass
class NA_Khazix_Sup_Akali(Ratings):
pass
class NA_Khazix_Sup_Alistar(Ratings):
pass
class NA_Khazix_Sup_Amumu(Ratings):
pass
class NA_Khazix_Sup_Anivia(Ratings):
pass
class NA_Khazix_Sup_Annie(Ratings):
pass
class NA_Khazix_Sup_Ashe(Ratings):
pass
class NA_Khazix_Sup_AurelionSol(Ratings):
pass
class NA_Khazix_Sup_Azir(Ratings):
pass
class NA_Khazix_Sup_Bard(Ratings):
pass
class NA_Khazix_Sup_Blitzcrank(Ratings):
pass
class NA_Khazix_Sup_Brand(Ratings):
pass
class NA_Khazix_Sup_Braum(Ratings):
pass
class NA_Khazix_Sup_Caitlyn(Ratings):
pass
class NA_Khazix_Sup_Camille(Ratings):
pass
class NA_Khazix_Sup_Cassiopeia(Ratings):
pass
class NA_Khazix_Sup_Chogath(Ratings):
pass
class NA_Khazix_Sup_Corki(Ratings):
pass
class NA_Khazix_Sup_Darius(Ratings):
pass
class NA_Khazix_Sup_Diana(Ratings):
pass
class NA_Khazix_Sup_Draven(Ratings):
pass
class NA_Khazix_Sup_DrMundo(Ratings):
pass
class NA_Khazix_Sup_Ekko(Ratings):
pass
class NA_Khazix_Sup_Elise(Ratings):
pass
class NA_Khazix_Sup_Evelynn(Ratings):
pass
class NA_Khazix_Sup_Ezreal(Ratings):
pass
class NA_Khazix_Sup_Fiddlesticks(Ratings):
pass
class NA_Khazix_Sup_Fiora(Ratings):
pass
class NA_Khazix_Sup_Fizz(Ratings):
pass
class NA_Khazix_Sup_Galio(Ratings):
pass
class NA_Khazix_Sup_Gangplank(Ratings):
pass
class NA_Khazix_Sup_Garen(Ratings):
pass
class NA_Khazix_Sup_Gnar(Ratings):
pass
class NA_Khazix_Sup_Gragas(Ratings):
pass
class NA_Khazix_Sup_Graves(Ratings):
pass
class NA_Khazix_Sup_Hecarim(Ratings):
pass
class NA_Khazix_Sup_Heimerdinger(Ratings):
pass
class NA_Khazix_Sup_Illaoi(Ratings):
pass
class NA_Khazix_Sup_Irelia(Ratings):
pass
class NA_Khazix_Sup_Ivern(Ratings):
pass
class NA_Khazix_Sup_Janna(Ratings):
pass
class NA_Khazix_Sup_JarvanIV(Ratings):
pass
class NA_Khazix_Sup_Jax(Ratings):
pass
class NA_Khazix_Sup_Jayce(Ratings):
pass
class NA_Khazix_Sup_Jhin(Ratings):
pass
class NA_Khazix_Sup_Jinx(Ratings):
pass
class NA_Khazix_Sup_Kalista(Ratings):
pass
class NA_Khazix_Sup_Karma(Ratings):
pass
class NA_Khazix_Sup_Karthus(Ratings):
pass
class NA_Khazix_Sup_Kassadin(Ratings):
pass
class NA_Khazix_Sup_Katarina(Ratings):
pass
class NA_Khazix_Sup_Kayle(Ratings):
pass
class NA_Khazix_Sup_Kayn(Ratings):
pass
class NA_Khazix_Sup_Kennen(Ratings):
pass
class NA_Khazix_Sup_Khazix(Ratings):
pass
class NA_Khazix_Sup_Kindred(Ratings):
pass
class NA_Khazix_Sup_Kled(Ratings):
pass
class NA_Khazix_Sup_KogMaw(Ratings):
pass
class NA_Khazix_Sup_Leblanc(Ratings):
pass
class NA_Khazix_Sup_LeeSin(Ratings):
pass
class NA_Khazix_Sup_Leona(Ratings):
pass
class NA_Khazix_Sup_Lissandra(Ratings):
pass
class NA_Khazix_Sup_Lucian(Ratings):
pass
class NA_Khazix_Sup_Lulu(Ratings):
pass
class NA_Khazix_Sup_Lux(Ratings):
pass
class NA_Khazix_Sup_Malphite(Ratings):
pass
class NA_Khazix_Sup_Malzahar(Ratings):
pass
class NA_Khazix_Sup_Maokai(Ratings):
pass
class NA_Khazix_Sup_MasterYi(Ratings):
pass
class NA_Khazix_Sup_MissFortune(Ratings):
pass
class NA_Khazix_Sup_MonkeyKing(Ratings):
pass
class NA_Khazix_Sup_Mordekaiser(Ratings):
pass
class NA_Khazix_Sup_Morgana(Ratings):
pass
class NA_Khazix_Sup_Nami(Ratings):
pass
class NA_Khazix_Sup_Nasus(Ratings):
pass
class NA_Khazix_Sup_Nautilus(Ratings):
pass
class NA_Khazix_Sup_Nidalee(Ratings):
pass
class NA_Khazix_Sup_Nocturne(Ratings):
pass
class NA_Khazix_Sup_Nunu(Ratings):
pass
class NA_Khazix_Sup_Olaf(Ratings):
pass
class NA_Khazix_Sup_Orianna(Ratings):
pass
class NA_Khazix_Sup_Ornn(Ratings):
pass
class NA_Khazix_Sup_Pantheon(Ratings):
pass
class NA_Khazix_Sup_Poppy(Ratings):
pass
class NA_Khazix_Sup_Quinn(Ratings):
pass
class NA_Khazix_Sup_Rakan(Ratings):
pass
class NA_Khazix_Sup_Rammus(Ratings):
pass
class NA_Khazix_Sup_RekSai(Ratings):
pass
class NA_Khazix_Sup_Renekton(Ratings):
pass
class NA_Khazix_Sup_Rengar(Ratings):
pass
class NA_Khazix_Sup_Riven(Ratings):
pass
class NA_Khazix_Sup_Rumble(Ratings):
pass
class NA_Khazix_Sup_Ryze(Ratings):
pass
class NA_Khazix_Sup_Sejuani(Ratings):
pass
class NA_Khazix_Sup_Shaco(Ratings):
pass
class NA_Khazix_Sup_Shen(Ratings):
pass
class NA_Khazix_Sup_Shyvana(Ratings):
pass
class NA_Khazix_Sup_Singed(Ratings):
pass
class NA_Khazix_Sup_Sion(Ratings):
pass
class NA_Khazix_Sup_Sivir(Ratings):
pass
class NA_Khazix_Sup_Skarner(Ratings):
pass
class NA_Khazix_Sup_Sona(Ratings):
pass
class NA_Khazix_Sup_Soraka(Ratings):
pass
class NA_Khazix_Sup_Swain(Ratings):
pass
class NA_Khazix_Sup_Syndra(Ratings):
pass
class NA_Khazix_Sup_TahmKench(Ratings):
pass
class NA_Khazix_Sup_Taliyah(Ratings):
pass
class NA_Khazix_Sup_Talon(Ratings):
pass
class NA_Khazix_Sup_Taric(Ratings):
pass
class NA_Khazix_Sup_Teemo(Ratings):
pass
class NA_Khazix_Sup_Thresh(Ratings):
pass
class NA_Khazix_Sup_Tristana(Ratings):
pass
class NA_Khazix_Sup_Trundle(Ratings):
pass
class NA_Khazix_Sup_Tryndamere(Ratings):
pass
class NA_Khazix_Sup_TwistedFate(Ratings):
pass
class NA_Khazix_Sup_Twitch(Ratings):
pass
class NA_Khazix_Sup_Udyr(Ratings):
pass
class NA_Khazix_Sup_Urgot(Ratings):
pass
class NA_Khazix_Sup_Varus(Ratings):
pass
class NA_Khazix_Sup_Vayne(Ratings):
pass
class NA_Khazix_Sup_Veigar(Ratings):
pass
class NA_Khazix_Sup_Velkoz(Ratings):
pass
class NA_Khazix_Sup_Vi(Ratings):
pass
class NA_Khazix_Sup_Viktor(Ratings):
pass
class NA_Khazix_Sup_Vladimir(Ratings):
pass
class NA_Khazix_Sup_Volibear(Ratings):
pass
class NA_Khazix_Sup_Warwick(Ratings):
pass
class NA_Khazix_Sup_Xayah(Ratings):
pass
class NA_Khazix_Sup_Xerath(Ratings):
pass
class NA_Khazix_Sup_XinZhao(Ratings):
pass
class NA_Khazix_Sup_Yasuo(Ratings):
pass
class NA_Khazix_Sup_Yorick(Ratings):
pass
class NA_Khazix_Sup_Zac(Ratings):
pass
class NA_Khazix_Sup_Zed(Ratings):
pass
class NA_Khazix_Sup_Ziggs(Ratings):
pass
class NA_Khazix_Sup_Zilean(Ratings):
pass
class NA_Khazix_Sup_Zyra(Ratings):
pass
| 6,545 | 3,733 |
# generated by datamodel-codegen:
# filename: Organization.schema.json
# timestamp: 1985-10-26T08:21:00+00:00
from __future__ import annotations
from pydantic import BaseModel, Field
class Schema(BaseModel):
__root__: str = Field(..., description='Identifier string of this object.')
| 297 | 105 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from tablo import GEOM_FIELD_NAME, WEB_MERCATOR_SRID
forward_sql = """
DO
$$
DECLARE
table_name name;
BEGIN
FOR table_name IN
SELECT tablo_featureservicelayer.table FROM tablo_featureservicelayer
LOOP
EXECUTE format('ALTER TABLE %I ALTER COLUMN {geom_col} TYPE geometry(''GEOMETRY'', {srid});', table_name);
END LOOP;
END;
$$
LANGUAGE plpgsql;
""".format(geom_col=GEOM_FIELD_NAME, srid=WEB_MERCATOR_SRID)
class Migration(migrations.Migration):
dependencies = [
('tablo', '0005_fix_esriGeometryPoints'),
]
operations = [
migrations.RunSQL(forward_sql)
]
| 776 | 279 |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.functional as F
from torch.nn.init import xavier_normal_
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
import numpy as np
from numpy.random import RandomState
from collections import defaultdict
import time
from tkge.data.dataset import SplitDataset
from tkge.data.custom_dataset import ICEWS14AtiseDatasetProcessor
from tkge.eval.metrics import Evaluation
from tkge.train.sampling import NonNegativeSampler
from Dataset import KnowledgeGraph
randseed = 9999
np.random.seed(randseed)
torch.manual_seed(randseed)
class MockAtiseDatasetProcessor(ICEWS14AtiseDatasetProcessor):
def __init__(self):
self.folder = "/home/gengyuan/workspace/tkge/data/icews14"
self.level = "day"
self.index = False
self.float = True
self.train_raw = []
self.valid_raw = []
self.test_raw = []
self.reciprocal_training = True
self.ent2id = dict()
self.rel2id = dict()
with open('/home/gengyuan/workspace/baseline/ATISE/icews14/entity2id.txt', 'r') as f:
ent2id = f.readlines()
for line in ent2id:
line = line.split('\t')
self.ent2id[line[0]] = int(line[1])
with open('/home/gengyuan/workspace/baseline/ATISE/icews14/relation2id.txt', 'r') as f:
rel2id = f.readlines()
for line in rel2id:
line = line.split('\t')
self.rel2id[line[0]] = int(line[1])
self.ts2id = defaultdict(None)
self.train_set = defaultdict(list)
self.valid_set = defaultdict(list)
self.test_set = defaultdict(list)
self.all_triples = []
self.all_quadruples = []
self.load()
self.process()
self.filter()
def index_relations(self, rel: str):
if rel.endswith('(RECIPROCAL)'):
return self.rel2id[rel[:-12]] + 230
else:
return self.rel2id[rel]
def index_entities(self, ent: str):
if ent == 'Horacio González':
ent = 'Horacio Gonzalez'
return self.ent2id[ent]
def process_time(self, origin: str):
# TODO (gengyuan) move to init method
self.gran = 3
start_sec = time.mktime(time.strptime('2014-01-01', '%Y-%m-%d'))
end_sec = time.mktime(time.strptime(origin, '%Y-%m-%d'))
day = int((end_sec - start_sec) / (self.gran * 24 * 60 * 60))
return day
class MockEvaluation(Evaluation):
def __init__(self, dataset, device):
self.dataset = dataset
self.vocab_size = dataset.num_entities()
self.device = device
self.filter = "time-aware"
self.ordering = "optimistic"
self.k = [1, 3, 10]
self.filtered_data = defaultdict(None)
self.filtered_data['sp_'] = self.dataset.filter(type=self.filter, target='o')
self.filtered_data['_po'] = self.dataset.filter(type=self.filter, target='s')
class MockSampler(NonNegativeSampler):
def __init__(self, dataset, as_matrix):
self.filter = False
self.as_matrix = as_matrix
self.dataset = dataset
class ATISE(nn.Module):
def __init__(self, n_entity, n_relation, embedding_dim, batch_size, learning_rate, gamma, cmin, cmax, gpu=True):
super(ATISE, self).__init__()
self.gpu = gpu
self.n_entity = n_entity
self.n_relation = n_relation
self.embedding_dim = embedding_dim
self.batch_size = batch_size
self.learning_rate = learning_rate
self.gamma = gamma
self.cmin = cmin
self.cmax = cmax
# Nets
self.emb_E = torch.nn.Embedding(n_entity, self.embedding_dim, padding_idx=0)
self.emb_E_var = torch.nn.Embedding(n_entity, self.embedding_dim, padding_idx=0)
self.emb_R = torch.nn.Embedding(n_relation, self.embedding_dim, padding_idx=0)
self.emb_R_var = torch.nn.Embedding(n_relation, self.embedding_dim, padding_idx=0)
self.emb_TE = torch.nn.Embedding(n_entity, self.embedding_dim, padding_idx=0)
self.alpha_E = torch.nn.Embedding(n_entity, 1, padding_idx=0)
self.beta_E = torch.nn.Embedding(n_entity, self.embedding_dim, padding_idx=0)
self.omega_E = torch.nn.Embedding(n_entity, self.embedding_dim, padding_idx=0)
self.emb_TR = torch.nn.Embedding(n_relation, self.embedding_dim, padding_idx=0)
self.alpha_R = torch.nn.Embedding(n_relation, 1, padding_idx=0)
self.beta_R = torch.nn.Embedding(n_relation, self.embedding_dim, padding_idx=0)
self.omega_R = torch.nn.Embedding(n_relation, self.embedding_dim, padding_idx=0)
# Initialization
r = 6 / np.sqrt(self.embedding_dim)
self.emb_E.weight.data.uniform_(-r, r)
self.emb_E_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_R.weight.data.uniform_(-r, r)
self.emb_R_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_TE.weight.data.uniform_(-r, r)
self.alpha_E.weight.data.uniform_(0, 0)
self.beta_E.weight.data.uniform_(0, 0)
self.omega_E.weight.data.uniform_(-r, r)
self.emb_TR.weight.data.uniform_(-r, r)
self.alpha_R.weight.data.uniform_(0, 0)
self.beta_R.weight.data.uniform_(0, 0)
self.omega_R.weight.data.uniform_(-r, r)
# Regularization
self.normalize_embeddings()
if self.gpu:
self.cuda()
def forward(self, X):
# h_i, t_i, r_i, d_i = X[:, 0].astype(np.int64), X[:, 1].astype(np.int64), X[:, 2].astype(np.int64), X[:,
# 3].astype(
# np.float32)
h_i, t_i, r_i, d_i = X[:, 0].long(), X[:, 2].long(), X[:, 1].long(), X[:, 3].float()
# if self.gpu:
# h_i = Variable(torch.from_numpy(h_i).cuda())
# t_i = Variable(torch.from_numpy(t_i).cuda())
# r_i = Variable(torch.from_numpy(r_i).cuda())
# d_i = Variable(torch.from_numpy(d_i).cuda())
#
# else:
# h_i = Variable(torch.from_numpy(h_i))
# t_i = Variable(torch.from_numpy(t_i))
# r_i = Variable(torch.from_numpy(r_i))
# d_i = Variable(torch.from_numpy(d_i))
pi = 3.14159265358979323846
h_mean = self.emb_E(h_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_E(h_i).view(-1, 1) * self.emb_TE(h_i).view(-1, self.embedding_dim) \
+ self.beta_E(h_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_E(h_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
t_mean = self.emb_E(t_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_E(t_i).view(-1, 1) * self.emb_TE(t_i).view(-1, self.embedding_dim) \
+ self.beta_E(t_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_E(t_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
r_mean = self.emb_R(r_i).view(-1, self.embedding_dim) + \
d_i.view(-1, 1) * self.alpha_R(r_i).view(-1, 1) * self.emb_TR(r_i).view(-1, self.embedding_dim) \
+ self.beta_R(r_i).view(-1, self.embedding_dim) * torch.sin(
2 * pi * self.omega_R(r_i).view(-1, self.embedding_dim) * d_i.view(-1, 1))
h_var = self.emb_E_var(h_i).view(-1, self.embedding_dim)
t_var = self.emb_E_var(t_i).view(-1, self.embedding_dim)
r_var = self.emb_R_var(r_i).view(-1, self.embedding_dim)
out1 = torch.sum((h_var + t_var) / r_var, 1) + torch.sum(((r_mean - h_mean + t_mean) ** 2) / r_var,
1) - self.embedding_dim
out2 = torch.sum(r_var / (h_var + t_var), 1) + torch.sum(((h_mean - t_mean - r_mean) ** 2) / (h_var + t_var),
1) - self.embedding_dim
out = (out1 + out2) / 4
return out
def log_rank_loss(self, y_pos, y_neg, temp=0):
M = y_pos.size(0)
N = y_neg.size(0)
y_pos = self.gamma - y_pos
y_neg = self.gamma - y_neg
C = int(N / M)
y_neg = y_neg.view(C, -1).transpose(0, 1)
p = F.softmax(temp * y_neg)
loss_pos = torch.sum(F.softplus(-1 * y_pos))
loss_neg = torch.sum(p * F.softplus(y_neg))
loss = (loss_pos + loss_neg) / 2 / M
if self.gpu:
loss = loss.cuda()
return loss
def rank_loss(self, y_pos, y_neg):
M = y_pos.size(0)
N = y_neg.size(0)
C = int(N / M)
y_pos = y_pos.repeat(C)
if self.gpu:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cuda()
else:
target = Variable(torch.from_numpy(-np.ones(N, dtype=np.float32))).cpu()
loss = nn.MarginRankingLoss(margin=self.gamma)
loss = loss(y_pos, y_neg, target)
return loss
def normalize_embeddings(self):
self.emb_E.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_E_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_R.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_R_var.weight.data.uniform_(self.cmin, self.cmax)
self.emb_TE.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TR.weight.data.renorm_(p=2, dim=0, maxnorm=1)
def regularization_embeddings(self):
lower = torch.tensor(self.cmin).float()
upper = torch.tensor(self.cmax).float()
if self.gpu:
lower = lower.cuda()
upper = upper.cuda()
self.emb_E_var.weight.data = torch.where(self.emb_E_var.weight.data < self.cmin, lower,
self.emb_E_var.weight.data)
self.emb_E_var.weight.data = torch.where(self.emb_E_var.weight.data > self.cmax, upper,
self.emb_E_var.weight.data)
self.emb_R_var.weight.data = torch.where(self.emb_R_var.weight.data < self.cmin, lower,
self.emb_R_var.weight.data)
self.emb_R_var.weight.data = torch.where(self.emb_R_var.weight.data > self.cmax, upper,
self.emb_R_var.weight.data)
self.emb_E.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_R.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TE.weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.emb_TR.weight.data.renorm_(p=2, dim=0, maxnorm=1)
def rank_left(self, X, facts, kg, timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.n_entity, 4])
i_score = torch.zeros(self.n_entity)
if self.gpu:
i_score = i_score.cuda()
for time_index in [triple[3], triple[4]]:
for i in range(0, self.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3] = time_index
i_score = i_score + self.forward(X_i).view(-1)
if rev_set > 0:
X_rev = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2] + self.n_relation // 2
X_rev[i, 3] = time_index
i_score = i_score + self.forward(X_rev).view(-1)
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2], fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out] = 1e6
rank_triple = torch.sum((i_score < target).float()).cpu().item() + 1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_i[i, 0] = i
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set > 0:
X_rev = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_rev[i, 0] = triple[1]
X_rev[i, 1] = i
X_rev[i, 2] = triple[2] + self.n_relation // 2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['lhs'][(fact[1], fact[2], fact[3], fact[4])]
target = i_score[int(triple[0])].clone()
i_score[filter_out] = 1e6
rank_triple = torch.sum((i_score < target).float()).cpu().item() + 1
rank.append(rank_triple)
print('left')
print(rank)
return rank
def rank_right(self, X, facts, kg, timedisc, rev_set=0):
rank = []
with torch.no_grad():
if timedisc:
for triple, fact in zip(X, facts):
X_i = np.ones([self.n_entity, 4])
i_score = torch.zeros(self.n_entity)
if self.gpu:
i_score = i_score.cuda()
for time_index in [triple[3], triple[4]]:
for i in range(0, self.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2]
X_i[i, 3] = time_index
i_score = i_score + self.forward(X_i).view(-1)
if rev_set > 0:
X_rev = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2] + self.n_relation // 2
X_rev[i, 3] = time_index
i_score = i_score + self.forward(X_rev).view(-1)
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2], fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out] = 1e6
rank_triple = torch.sum((i_score < target).float()).cpu().item() + 1
rank.append(rank_triple)
else:
for triple, fact in zip(X, facts):
X_i = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_i[i, 0] = triple[0]
X_i[i, 1] = i
X_i[i, 2] = triple[2]
X_i[i, 3] = triple[3]
i_score = self.forward(X_i)
if rev_set > 0:
X_rev = np.ones([self.n_entity, 4])
for i in range(0, self.n_entity):
X_rev[i, 0] = i
X_rev[i, 1] = triple[0]
X_rev[i, 2] = triple[2] + self.n_relation // 2
X_rev[i, 3] = triple[3]
i_score = i_score + self.forward(X_rev).view(-1)
if self.gpu:
i_score = i_score.cuda()
filter_out = kg.to_skip_final['rhs'][(fact[0], fact[2], fact[3], fact[4])]
target = i_score[int(triple[1])].clone()
i_score[filter_out] = 1e6
rank_triple = torch.sum((i_score < target).float()).cpu().item() + 1
rank.append(rank_triple)
print('right')
print(rank)
return rank
def timepred(self, X):
rank = []
with torch.no_grad():
for triple in X:
X_i = np.ones([self.n_day, len(triple)])
for i in range(self.n_day):
X_i[i, 0] = triple[0]
X_i[i, 1] = triple[1]
X_i[i, 2] = triple[2]
X_i[i, 3:] = self.time_dict[i]
i_score = self.forward(X_i)
if self.gpu:
i_score = i_score.cuda()
target = i_score[triple[3]]
rank_triple = torch.sum((i_score < target).float()).cpu().item() + 1
rank.append(rank_triple)
return rank
model_path = "/home/gengyuan/workspace/baseline/ATISE/icews14/ATISE/timediscrete0/dim500/lr0.0000/neg_num10/3day/gamma120/cmin0.0030/params.pkl"
model = ATISE(7129, 460, 500, 64, 0, 120, 0.003, 0.3, True)
model_state_dict = torch.load(model_path)
model.load_state_dict(model_state_dict)
def test():
device = 'cuda' if torch.cuda.is_available() else 'cpu'
dataset = MockAtiseDatasetProcessor()
evaluator = MockEvaluation(dataset, device)
sampler = MockSampler(dataset, as_matrix=True)
#
# print(dataset.ts2id)
# print(dataset.index_timestamps(23))
# print(dataset.index_relations('Arrest, detain, or charge with legal action'))
# print(dataset.index_entities('Japan'))
# print(dataset.index_entities('Police (Japan)'))
# print(dataset.filter(type='time-aware', target='s')['None-9-2205-33'])
# print(dataset.get("train")['triple'][15442])
# print(dataset.get("train")['timestamp_id'][15442])
# print(dataset.get("train")['timestamp_float'][15442])
#
#
# assert False
valid_loader = torch.utils.data.DataLoader(
SplitDataset(dataset.get("test"), ['timestamp_float', 'timestamp_id']),
shuffle=False,
batch_size=1,
num_workers=0
)
with torch.no_grad():
model.eval()
metrics = dict()
metrics['head'] = defaultdict(float)
metrics['tail'] = defaultdict(float)
rank_left = []
rank_right = []
scores_head = []
scores_tail = []
filter_left = []
filter_right = []
l = 0
dfs = dataset.filter(type="time-aware", target="s")
dfo = dataset.filter(type="time-aware", target="o")
for batch in valid_loader:
bs = batch.size(0)
dim = batch.size(1)
l += bs
print(l)
samples_head, _ = sampler.sample(batch, "head")
samples_tail, _ = sampler.sample(batch, "tail")
samples_head = samples_head.to(device)
samples_tail = samples_tail.to(device)
samples_head = samples_head.view(-1, dim)
samples_tail = samples_tail.view(-1, dim)
batch_scores_head = model.forward(samples_head)
batch_scores_tail = model.forward(samples_tail)
batch_scores_head = batch_scores_head.view(bs, -1)
batch_scores_tail = batch_scores_tail.view(bs, -1)
# reciprocal
samples_head_reciprocal = samples_head.clone().view(-1, dim)
samples_tail_reciprocal = samples_tail.clone().view(-1, dim)
samples_head_reciprocal[:, 1] += 230
samples_head_reciprocal[:, [0, 2]] = samples_head_reciprocal.index_select(1, torch.Tensor(
[2, 0]).long().to(device))
samples_tail_reciprocal[:, 1] += 230
samples_tail_reciprocal[:, [0, 2]] = samples_tail_reciprocal.index_select(1, torch.Tensor(
[2, 0]).long().to(device))
batch_scores_head_reci = model.forward(samples_head_reciprocal)
batch_scores_tail_reci = model.forward(samples_tail_reciprocal)
batch_scores_head_reci = batch_scores_head_reci.view(bs, -1)
batch_scores_tail_reci = batch_scores_tail_reci.view(bs, -1)
batch_scores_head += batch_scores_head_reci
batch_scores_tail += batch_scores_tail_reci
scores_head.append(batch_scores_head)
scores_tail.append(batch_scores_tail)
batch_metrics = dict()
batch_metrics['head'] = evaluator.eval(batch, batch_scores_head, miss='s')
batch_metrics['tail'] = evaluator.eval(batch, batch_scores_tail, miss='o')
# print filter
filter_left.append(
dfs[f'None-{int(batch[0, 1])}-{int(batch[0, 2])}-{int(batch[0, -1])}'])
filter_right.append(
dfo[f'{int(batch[0, 0])}-{int(batch[0, 1])}-None-{int(batch[0, -1])}'])
# rank_left.append(batch_metrics['head']['mean_ranking'])
# rank_right.append(batch_metrics['tail']['mean_ranking'])
# TODO(gengyuan) refactor
for pos in ['head', 'tail']:
for key in batch_metrics[pos].keys():
metrics[pos][key] += batch_metrics[pos][key] * bs
# rank = rank_left + rank_right
# torch.save(rank, "/home/gengyuan/workspace/baseline/ATISE/rank_tkge.pt")
# rank2 = torch.load("/home/gengyuan/workspace/baseline/ATISE/rank.pt")
#
# print('assert Equal')
# print(rank==rank2)
# torch.save(scores_head + scores_tail, "/home/gengyuan/workspace/baseline/ATISE/scores_tkge.pt")
torch.save(filter_left, "/home/gengyuan/workspace/baseline/ATISE/filter_left_tkge.pt")
torch.save(filter_right, "/home/gengyuan/workspace/baseline/ATISE/filter_right_tkge.pt")
for pos in ['head', 'tail']:
for key in metrics[pos].keys():
metrics[pos][key] /= l
print(f"Metrics(head prediction) in iteration : {metrics['head'].items()}")
print(f"Metrics(tail prediction) in iteration : {metrics['tail'].items()}")
def test_sc():
def mean_rank(rank):
m_r = 0
N = len(rank)
for i in rank:
m_r = m_r + i / N
return m_r
def mrr(rank):
mrr = 0
N = len(rank)
for i in rank:
mrr = mrr + 1 / i / N
return mrr
def hit_N(rank, N):
hit = 0
for i in rank:
if i <= N:
hit = hit + 1
hit = hit / len(rank)
return hit
kg = KnowledgeGraph(data_dir="/home/gengyuan/workspace/baseline/ATISE/icews14", gran=3, rev_set=1)
test_pos = np.array(kg.test_triples)
print(test_pos)
rank = model.rank_left(test_pos, kg.test_facts, kg, 0, rev_set=1)
rank_right = model.rank_right(test_pos, kg.test_facts, kg, 0, rev_set=1)
rank = rank + rank_right
m_rank = mean_rank(rank)
mean_rr = mrr(rank)
hit_1 = hit_N(rank, 1)
hit_3 = hit_N(rank, 3)
hit_5 = hit_N(rank, 5)
hit_10 = hit_N(rank, 10)
print('validation results:')
print('Mean Rank: {:.0f}'.format(m_rank))
print('Mean RR: {:.4f}'.format(mean_rr))
print('Hit@1: {:.4f}'.format(hit_1))
print('Hit@3: {:.4f}'.format(hit_3))
print('Hit@5: {:.4f}'.format(hit_5))
print('Hit@10: {:.4f}'.format(hit_10))
if __name__ == '__main__':
test()
| 23,755 | 8,482 |
# -*- coding: utf-8 -*-
import pytest
from six import iteritems
def test_security_definition_property_extraction(security_dict, security_spec):
security_definitions = security_dict['securityDefinitions']
for security_name, security_spec_dict in iteritems(security_definitions):
security_object = security_spec.security_definitions[security_name]
for key, value in iteritems(security_spec_dict):
assert getattr(security_object, key if key != 'in' else 'location') == value
@pytest.mark.parametrize(
'resource, operation, expected_scopes',
[
('example1', 'get_example1', [{'apiKey1': []}, {'apiKey2': []}]),
('example2', 'get_example2', [{'apiKey3': []}]),
('example3', 'get_example3', [{'apiKey1': [], 'apiKey2': []}, {'apiKey2': []}]),
('example4', 'get_example4', [{'oauth2': ['write:resource']}]),
('example5', 'get_example5', []),
]
)
def test_security_scopes(security_spec, resource, operation, expected_scopes):
def _get_operation():
return security_spec.resources[resource].operations[operation]
assert [
security_requirement.security_scopes
for security_requirement in _get_operation().security_requirements
] == expected_scopes
| 1,265 | 384 |
def willYou(young, beautiful, loved):
return loved != (young and beautiful) | 79 | 23 |
import json
from typing import Union, Optional, Tuple, List
import numpy as np
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from shared import LANG_TO_INT
class DataSplitter:
def __init__(self, path: str, vectorizer: Optional[Union[DictVectorizer, TfidfVectorizer, CountVectorizer]] = None, seed: Optional[int] = None, scale: bool = True):
self.data_path = path
self.vectorizer = vectorizer or DictVectorizer(sparse=False)
self.transformer = TfidfTransformer() if type(self.vectorizer) == CountVectorizer else None
self.scale = type(self.vectorizer) not in (TfidfVectorizer, CountVectorizer) and scale
self.scaler = StandardScaler()
self.random_seed = seed
def collect_features_data(self) -> Tuple[Union[np.ndarray, List[str]], np.ndarray]:
if type(self.vectorizer) == DictVectorizer:
return self._collect_dict_vectorizer_features()
elif type(self.vectorizer) in (TfidfVectorizer, CountVectorizer):
return self._collect_tfidf_features()
else:
raise NotImplementedError
def _collect_dict_vectorizer_features(self) -> Tuple[np.ndarray, np.ndarray]:
examples = []
ys = []
with open(self.data_path, "r") as file:
for line in file:
info = json.loads(line)
examples.append(info["features"])
ys.append(LANG_TO_INT[info["lang"]])
return np.array(examples), np.array(ys)
def _collect_tfidf_features(self) -> Tuple[List[str], np.ndarray]:
examples = []
ys = []
with open(self.data_path, "r") as file:
for line in file:
info = json.loads(line)
examples.append(info["code"])
ys.append(LANG_TO_INT[info["lang"]])
return examples, np.array(ys)
def prepare_data(self, data: Union[np.ndarray, List[str]], fit: bool = False) -> np.ndarray:
if type(self.vectorizer) in (TfidfVectorizer, CountVectorizer):
assert not self.scale
if fit:
if self.scale:
transformed = self.scaler.fit_transform(self.vectorizer.fit_transform(data))
else:
transformed = self.vectorizer.fit_transform(data)
elif self.scale:
transformed = self.scaler.transform(self.vectorizer.transform(data))
else:
transformed = self.vectorizer.transform(data)
if type(transformed) != np.ndarray:
transformed = transformed.toarray()
return transformed
def split_train_vali_test(self, X: Union[np.ndarray, List[str]], y: np.ndarray, split_1: float = 0.75, split_2: float = 0.66) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
X_tv, X_test, y_tv, y_test = train_test_split(X, y, train_size=split_1, random_state=self.random_seed)
X_train, X_vali, y_train, y_vali = train_test_split(X_tv, y_tv, train_size=split_2, random_state=self.random_seed)
split_data = (self.prepare_data(X_train, fit=True), self.prepare_data(X_vali), self.prepare_data(X_test), y_train, y_vali, y_test)
if type(self.vectorizer) == CountVectorizer:
for split in split_data:
self.transformer.fit_transform(split.reshape(1, -1))
return split_data
| 3,555 | 1,109 |
#!/usr/bin/python
#Sorts based on top 50 CMetric, all callPaths - CMetric
#, all call paths - call path count and all samples
from __future__ import print_function
from bcc import BPF, PerfType, PerfSWConfig
from bcc import BPF
import sys
import ctypes as ct # For mapping the 'C' structure to Python
import argparse #For parsing command line arguments
import datetime
import os
import operator
import subprocess
import re
# arg validation
def positive_int(val):
try:
ival = int(val)
except ValueError:
raise argparse.ArgumentTypeError("must be an integer")
if ival < 0:
raise argparse.ArgumentTypeError("must be positive")
return ival
def positive_nonzero_int(val):
ival = positive_int(val)
if ival == 0:
raise argparse.ArgumentTypeError("must be nonzero")
return ival
parser = argparse.ArgumentParser(description="Generates stack traces for critical code sections")
parser.add_argument("-x", metavar="<Path to executable>", dest = "targetPath", required = True, help = "Full path to the executable file to be profiled - Required")
parser.add_argument("-t", metavar="<Threshold>", dest = "threshold", type = positive_int, required = False, help = "Number active threads to trigger stack trace. Default = total no. of threads/2" )
parser.add_argument("-f", metavar="<Sampling Frequency>", dest = "sample_freq", type = positive_int, required = False, help = "Sampling frequency in Hz. Default = 333Hz (equivalent to 3 ms)" )
parser.add_argument("-d", metavar="<Stack Depth>", dest = "stack_depth", type = positive_int, required = False, help = "Maximum Stack depth for stack unwinding. Default = 10" )
parser.add_argument("-b", metavar="<Ring buffer Size>", dest = "buffer", type = positive_int, required = False, help = "Number of pages to be allocated for the ring buffer, Default = 64" )
parser.add_argument("--threads_only", help = "Trace threads alone", action = "store_true")
parser.add_argument("--process_only", help = "Trace processes alone", action = "store_true")
parser.add_argument("--trace_lib", help = "Include library paths in tracing", action = "store_true")
parser.add_argument("--kernel_stack", help = "Get kernel stack traces", action = "store_true")
args = parser.parse_args()
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>
#include <linux/sched.h>
#include <linux/types.h>
//Structure to pass information from the kernel probe to the user probe
struct key_t {
u32 tid; //Thread ID
u32 tgid; // Parent thread ID
u64 cm; //CMetric
int source; // 0 - sampling, 1 - critical time slice, 2 - non-critical time slice
int user_stackid;
int kernel_stackid;
u64 inst_ptr;
int store_stackTop;
};
BPF_HASH(threadList, u32, u32); //Stores threadIds of participating threads - Global
BPF_HASH(threadCount, u32, u32, 1); //Stores number of active threads - Global
BPF_HASH(tsp, u32, u64, 1); //Stores timestamp of previous event
BPF_ARRAY(count, u32, 1); //Stores the total thread count (parent not included)
BPF_HASH(global_CM, u32, u64, 1); //Keeps track of cumulative sum of CMetric - Global
BPF_PERCPU_ARRAY(local_CM, u64, 1); // To store the snapshot of global_CM when a thread is switched in
BPF_HASH(CM_hash, u32, u64); // Criticality Metric hash map for each thread
BPF_HASH(GLOBAL_WT_TC, u32, u64,1); //Stores the cumulative sum of weighted thread Count - Global
BPF_PERCPU_ARRAY(LOCAL_WT_TC, u64,1); //Stores the snapshot of GLOBAL_WT_TC - CPU Local
BPF_PERCPU_ARRAY(inTS, u64, 1); //Store the time at which a thread was switched in - CPU Local
BPF_PERF_OUTPUT(events); //Buffer to write event details
BPF_STACK_TRACE(user_stacktraces, 4086);
BPF_STACK_TRACE(kernel_stacktraces, 4086);
/*sched_switch_args {
// from /sys/kernel/debug/tracing/events/sched/sched_switch/format
u64 __unused__;
char prev_comm[16];
pid_t prev_pid;
int prev_prio;
long prev_state;
char next_comm[16];
pid_t next_pid;
int next_prio;
};
*/
TRACEPOINT_PROBE(task, task_rename){
u32 threadId, totalCount;
char comm[16];
u32 zero32 = 0, one = 1;
int len = bpf_probe_read_str(&comm, sizeof(args->newcomm), args->newcomm);
if(!len)
return 0;
//Compare the command argument with traced command
if(PGM_FILTER){
bpf_probe_read(&threadId, sizeof(threadId), &args->pid);
threadList.insert(&threadId, &zero32); //Store the thread ID in the hash startTracing.lookup_or_init(&threadId, &zero32);
u32 *countVal = count.lookup_or_init(&zero32, &zero32);
lock_xadd(countVal,1);
}
return 0;
}
TASK_NEWTASK
int do_perf_event(struct bpf_perf_event_data *ctx){
u32 zero32 = 0;
u32 threadId = bpf_get_current_pid_tgid();
u32 *val = threadList.lookup(&threadId);
if(!val)
return 0;
u32 *activeCount = threadCount.lookup(&zero32);
if(!activeCount)
{return 0;}
u32 tempCount;
bpf_probe_read(&tempCount, sizeof(tempCount), activeCount);
u32 *totalThreadCount = count.lookup(&zero32);
if(!totalThreadCount)
return 0;
u32 totalCount;
bpf_probe_read(&totalCount, sizeof(totalCount), totalThreadCount);
if( (tempCount <= STACK_FILTER) || tempCount ==1 ){
struct key_t key = {};
key.tid = bpf_get_current_pid_tgid();
key.tgid = bpf_get_current_pid_tgid()>>32;
key.cm = 0;
key.source = 0;
if(TRACE_THREADS_ONLY){
key.inst_ptr = PT_REGS_IP(&ctx->regs); //Get the instruction pointer
events.perf_submit(ctx, &key, sizeof(key)); //Write details to the ring buffer
}
}
return 0;
}
TRACEPOINT_PROBE(sched, sched_process_exit){
u32 zero32 = 0;
//Get the current tid
u32 threadId;
bpf_probe_read(&threadId, sizeof(threadId), &args->pid);
//Check if the thread ID belongs to the application
u32 *val = threadList.lookup(&threadId);
if(!val)
return 0;
//Decrement the number of threads
u32 *countVal = count.lookup(&zero32);
if(!countVal)
return 0;
//lock_xadd(countVal, -1);
countVal -= 1;
return 0;
}
TRACEPOINT_PROBE(sched, sched_wakeup){
u32 targetID, zero32 = 0, status, one32 = 1;
//Check if thread being woken up belongs to the application
bpf_probe_read(&targetID, sizeof(targetID), &args->pid);
u32 *list = threadList.lookup(&targetID);
if (!list)
return 0;
/////////////////////////////////////////////////////////////////////
if(args->success){ //If waking was successful
u32 *activeCount = threadCount.lookup(&zero32);
if(!activeCount)
{return 0;}
u32 prev_tCount; //Local variable to store thread count
bpf_probe_read(&prev_tCount, sizeof(prev_tCount), activeCount);
//Increment thread count if thread was inactive
bpf_probe_read(&status, sizeof(status), list);
if(status == 0)
lock_xadd(activeCount,1);
//Set thread as active
threadList.update(&targetID,&one32);
}
return 0;
}
//Tracepoint probe for the Sched_Switch tracepoint
TRACEPOINT_PROBE(sched, sched_switch){
u32 one32=1, arrayKey=0, zero32=0;
u32 *listVal, *listVal1; //Pointers to entries in threadList map
u32 next_pid, prev_pid;
u64 zero64 = 0;
//Copy data to BPF stack
bpf_probe_read(&next_pid, sizeof(next_pid), &args->next_pid);
bpf_probe_read(&prev_pid, sizeof(prev_pid), &args->prev_pid);
//Look up thread ids in the list created by sys_clone()
listVal1 = threadList.lookup(&next_pid);
listVal = threadList.lookup(&prev_pid);
u32 prev=0, next=0;
if(listVal){
bpf_probe_read(&prev, sizeof(prev),listVal);
prev = 1;
}
if(listVal1){
bpf_probe_read(&next, sizeof(next),listVal1);
next = 1;
}
//Return if the switching threads do not belong to the application
if( !prev && !next)
return 0;
//////////////////////////////////////////////////////////////////////
//Calculate values common for all switching events
u64 interval, intervalCM;
u64 *oldTS = tsp.lookup_or_init(&arrayKey, &zero64);
if(!oldTS)
{return 0;}
u64 tempTS;
bpf_probe_read(&tempTS, sizeof(tempTS), oldTS); //Copy Old time from bpf map to local variable
u64 newTS = bpf_ktime_get_ns();
tsp.update(&arrayKey, &newTS); //Update time stamp
//The thread count is initialized to one as the first switch in event is always missed.
u32 *ptr_threadCount = threadCount.lookup_or_init(&arrayKey, &one32);
if(!ptr_threadCount)
{return 0;}
int prev_tc; //Temporary variable to store thread count for the previous switching interval
bpf_probe_read(&prev_tc, sizeof(prev_tc),ptr_threadCount);
if(newTS < tempTS)//Very rarely, event probes are triggered out of order, which are ignored
return 0;
if(tempTS==0 || prev_tc==0){ //If first event or no active threads in during the previous interval, prev interval = 0
interval = 0;
}
else
interval = (newTS - tempTS); //Switching interval
u64 *ptr_globalCM = global_CM.lookup_or_init(&arrayKey, &zero64);
if(!ptr_globalCM)
return 0;
//Calculate the CMetric for previous interval and add it to global_CM
if (interval != 0){
intervalCM = interval/prev_tc;
lock_xadd(ptr_globalCM, intervalCM);
}
//Calculate weighted thread count for previous interval
u64 wt_threadCount = (interval) * prev_tc;
u64 *g_wt_threadCount = GLOBAL_WT_TC.lookup_or_init(&arrayKey, &zero64);
if(!g_wt_threadCount)
return 0;
lock_xadd(g_wt_threadCount, wt_threadCount); //Add to global weighted thread count
//////////////////////////////////////////////////////////////////////
//If previous thread was a peer thread
if(prev){
//Decrement active thread count only if thread switched out is not in RUNNING (0) state
if(args->prev_state != TASK_RUNNING){
if(prev_tc > 0 ){
lock_xadd(ptr_threadCount, -1);
}
//Mark the thread as inactive in the threadList hash map
threadList.update(&prev_pid,&zero32);
}
else
//Mark the thread as active as thread is switched out to TASK_RUNNING state
threadList.update(&prev_pid,&one32);
u64 temp;
//Get updated CM
bpf_probe_read(&temp, sizeof(temp),ptr_globalCM);
//Get snapshot of global_CM which was stored in local_CM when prev_pid was switched in
u64 *cpuCM = local_CM.lookup_or_init(&arrayKey, &zero64);
if(!cpuCM)
{return 0;}
//Update the CM of the thread by adding the CM for the time slice
u64 updateCM = temp - (*cpuCM);
u64 *tCM = CM_hash.lookup_or_init(&prev_pid, &zero64);
if(!tCM)
{return 0;}
*tCM = *tCM + updateCM;
//Get LOCAL_WT_TC, the thread's weighted threadCount at the time it was switched in.
u64 *t_wt_threadCount;
t_wt_threadCount = LOCAL_WT_TC.lookup_or_init(&arrayKey, &zero64);
if(!t_wt_threadCount)
{return 0;}
u64 temp_g_wt_threadCount, temp_t_wt_threadCount;
bpf_probe_read(&temp_g_wt_threadCount, sizeof(temp_g_wt_threadCount), g_wt_threadCount);
bpf_probe_read(&temp_t_wt_threadCount, sizeof(temp_t_wt_threadCount), t_wt_threadCount);
//Reset the per-CPU CMetric counter
local_CM.update(&arrayKey, &zero64);
//Reset local weighted ThreadCount counter
LOCAL_WT_TC.update(&arrayKey, &zero64);
//Get time when this thread was switched in
oldTS = inTS.lookup_or_init(&arrayKey, &zero64);
if(!oldTS)
return 0;
u64 switch_in_time, timeSlice;
bpf_probe_read(&switch_in_time, sizeof(switch_in_time), oldTS);
timeSlice = (newTS - switch_in_time);
//Reset switch in time
inTS.update(&arrayKey, &zero64);
u32 *totalThreadCount = count.lookup(&zero32);
if(!totalThreadCount)
return 0;
u32 totalCount;
bpf_probe_read(&totalCount, sizeof(totalCount), totalThreadCount);
//Calculate the average number of threads
u32 ratio = (temp_g_wt_threadCount - temp_t_wt_threadCount) / timeSlice;
struct key_t key = {};
key.tid = prev_pid;
key.tgid = bpf_get_current_pid_tgid()>>32;
key.cm = updateCM;
if( (ratio <= STACK_FILTER || ratio == 1) && TRACE_THREADS_ONLY){ //If thread_avg < threshold and not parent thread
key.user_stackid = user_stacktraces.get_stackid(args, BPF_F_USER_STACK);
if (GET_KERNEL_STACK && args->prev_state != TASK_RUNNING)
key.kernel_stackid= kernel_stacktraces.get_stackid(args, 0);
else
key.kernel_stackid = -1;
key.source = 1;
}
else{
key.user_stackid = 0;
key.source = 2;
}
key.store_stackTop = ((prev_tc <= STACK_FILTER) || prev_tc == 1)? 1:0;
if(TRACE_THREADS_ONLY)
events.perf_submit(args, &key, sizeof(key));
}
//Next thread is a peer thread
if(next){
//Get the previous state of this thread from the THREADLIST
u32 tempNext;
bpf_probe_read(&tempNext, sizeof(tempNext), listVal1);
//If the thread was not in TASK_RUNNING state
if(tempNext == 0){
lock_xadd(ptr_threadCount, 1); //Increment the number of active threads
}
threadList.update(&next_pid, &one32); //Set the thread status to RUNNING state
u64 temp;
//Get updated CM and store it to the CPU counter
bpf_probe_read(&temp, sizeof(temp),ptr_globalCM);
local_CM.update(&arrayKey,&temp);
//Store switch in time
inTS.update(&arrayKey, &newTS);
//Store the local cumulative weighted thread count
u64 temp_g_wt_threadCount;
bpf_probe_read(&temp_g_wt_threadCount, sizeof(temp_g_wt_threadCount), g_wt_threadCount);
LOCAL_WT_TC.update(&arrayKey, &temp_g_wt_threadCount);
}
return 0;
}
"""
task_newtask_pgm = """TRACEPOINT_PROBE(task, task_newtask){
u32 zero32=0;
char comm[TASK_COMM_LEN];
bpf_get_current_comm(&comm, sizeof(comm));
//We can also check for the parent id in the threadlist
//But if the parent was created before starting tracing this can fail
//So we check the command line instead
//If application is being traced
if(PGM_FILTER){
u32 threadId;
bpf_probe_read(&threadId, sizeof(threadId), &args->pid);
u32 *val = threadList.lookup_or_init(&threadId, &zero32); //Store the thread ID in the hash
u32 *countVal = count.lookup_or_init(&zero32, &zero32);
lock_xadd(countVal,1);
}
return 0;
}"""
#Path to executable
targetPath = ""
#Executable name
pgmName = ""
#Segments for customizing the filters
task_newtask_probe = task_newtask_pgm
trace_threads_only = '1'
get_kernel_stack = '0'
if args.threads_only:
trace_threads_only = 'key.tgid != key.tid'
if args.process_only:
task_newtask_probe = ''
if args.kernel_stack:
get_kernel_stack = '1'
#Get the path to target
if args.targetPath is not None:
targetPath = args.targetPath.rstrip(os.sep)
pgmName = os.path.basename(targetPath)
if pgmName is not None:
pgm_filter = 'comm[0]==\'%c\' && comm[1]==\'%c\' && comm[2]==\'%c\' && comm[3]==\'%c\'' % (pgmName[0],pgmName[1], pgmName[2], pgmName[3])
if args.threshold is not None:
stack_filter = '%d' % ( (args.threshold) )
else:
stack_filter = 'totalCount/2'
if args.sample_freq is not None:
freq = args.sample_freq
else:
freq = 333
if args.stack_depth is not None:
depth = args.stack_depth
else:
depth = 10
if args.buffer is not None:
buffer_size = args.buffer
else:
buffer_size = 64
bpf_text = bpf_text.replace('TASK_NEWTASK', task_newtask_probe)
bpf_text = bpf_text.replace('PGM_FILTER', pgm_filter)
bpf_text = bpf_text.replace('STACK_FILTER', stack_filter)
bpf_text = bpf_text.replace('TRACE_THREADS_ONLY', trace_threads_only)
bpf_text = bpf_text.replace('GET_KERNEL_STACK', get_kernel_stack)
#Print the customized program
#print(bpf_text)
print ("\n\n---Press Ctrl-C to start post processing---")
# load BPF program
b = BPF(text=bpf_text)
b.attach_perf_event(ev_type=PerfType.SOFTWARE,
ev_config=PerfSWConfig.CPU_CLOCK, fn_name="do_perf_event",
sample_freq=freq)
class Data(ct.Structure):
_fields_ = [
("tid", ct.c_uint),
("tgid", ct.c_uint),
("cm", ct.c_ulonglong),
("source", ct.c_uint),
("user_stack_id", ct.c_int),
("kernel_stack_id", ct.c_int),
("inst_ptr", ct.c_ulonglong),
("store_stackTop", ct.c_int)]
user_stack_traces = b["user_stacktraces"]
kernel_stack_traces = b["kernel_stacktraces"]
sampleAddr = dict() #Stores addresses corresponding to samples
CMetric = dict() #Dictionary to store CMetric
CM_Entry = 1 #Number of CMetric entry
CMetric_sampleAddr = dict() # Stores the sample address for each Cmetric - to get line of code
CMetric_callPath = dict() # Stores the call path for each CMetric
user_symbolMap = dict() #Store symbols corresponding addresses
kernel_symbolMap = dict()
total_switch = 0
noSample = 0
###############################################
#Function to trim the symbols of arguments
def trimSymbol(string_ret):
if '[' in string_ret:
symbol = (string_ret.rsplit('[',1))
if '@' in symbol[0]:
function = (symbol[0].split('@', 1)[0])
string_ret = function + "()" + "[" + symbol[1]
return string_ret
else:
string_ret = symbol[0].split('(',1)
return string_ret[0]+'()['+ symbol[1]
else:
return string_ret.split('(',1)[0]+'()'
################################################
def getKernelStack(kernel_stack_id):
kernel_call_path = ""
kernel_flag = 0
kernel_stack =[] if kernel_stack_id < 0 else \
kernel_stack_traces.walk(kernel_stack_id)
#For each address in the stack trace, get the symbols and create call path
for addr in kernel_stack:
if addr in kernel_symbolMap:
kernel_string_ret = kernel_symbolMap[addr]
else:
kernel_string_ret = b.ksym(addr)
kernel_symbolMap[addr] = kernel_string_ret
if kernel_flag == 0:
kernel_call_path = kernel_call_path + (kernel_string_ret.split('+',1)[0]).strip("\n ' '")
kernel_flag += 1
else: #If not stack top address
kernel_call_path = kernel_call_path + ("\n\t") + "<---" + (kernel_string_ret.split('+',1)[0]).strip("\n ' '")
return kernel_call_path
################################################
def print_event(cpu, data, size):
global CM_Entry #Unique id for stack traces
global total_switch # Total number of context switches
global noSample #Stores the number of switches without samples
event = ct.cast(data, ct.POINTER(Data)).contents
flag = 0
user_call_path = ""
kernel_call_path = ""
if event.source == 0: #Sample data
if event.inst_ptr in user_symbolMap:
string_ret = user_symbolMap[event.inst_ptr]
else:
#Map address to symbols
string_ret = b.sym(event.inst_ptr, event.tgid, show_offset=False, show_module = True)
string_ret = trimSymbol(string_ret)
user_symbolMap[event.inst_ptr]=string_ret
if "unknown" in string_ret:
return
#Add to list of samples for this thread ID
if event.tid not in sampleAddr:
sampleAddr[event.tid] = list()
if (string_ret.find(pgmName) >= 0): # If address belongs to application address map
sampleAddr[event.tid].append("0x" + format(event.inst_ptr, 'x'))
else:
sampleAddr[event.tid].append(string_ret)
return
if event.source == 2: # Reset Sample array if time slice not critical
if event.tid in sampleAddr:
sampleAddr[event.tid]=[]
total_switch += 1
return
if event.source == 1: #Critical Stack trace
skip_stackTop = 0
appl_addr = 0
total_switch += 1
user_stack =[] if event.user_stack_id < 0 else \
user_stack_traces.walk(event.user_stack_id)
#For each address in the stack trace, get the symbols and create call path
for addr in user_stack:
if addr in user_symbolMap:
string_ret = user_symbolMap[addr]
else:
string_ret = b.sym(addr, event.tgid, show_offset=False, show_module = True)
string_ret = trimSymbol(string_ret)
user_symbolMap[addr]=string_ret
if "unknown" in string_ret:
if flag == 0:
skip_stackTop = 1
continue
if (string_ret.find(pgmName) >= 0): # If address belongs to application address map
appl_addr = 1
if appl_addr or args.trace_lib:
if flag == 0: #Store top address of stack trace, if no samples
if event.tid not in sampleAddr:
sampleAddr[event.tid] = list()
if len(sampleAddr[event.tid]) ==0 and event.store_stackTop == 1 and skip_stackTop ==0:
noSample += 1
if appl_addr:
sampleAddr[event.tid].append("0xz" + format(addr, 'x'))
user_call_path = user_call_path+ (string_ret.split('+',1)[0]).strip("\n ' '")
else: #If not stack top address
user_call_path = user_call_path + "\n\t" + "<---" + (string_ret.split('+',1)[0]).strip("\n ' '")
flag += 1
if flag==depth: #Number of stack frames
break
if flag>0:
if get_kernel_stack == '1' and event.kernel_stack_id >= 0:
kernel_call_path = getKernelStack(event.kernel_stack_id)
CMetric[CM_Entry] = event.cm #Stores Cmetric of this critical stack trace
#Stores sample addresses of this critical stack trace
CMetric_sampleAddr[CM_Entry] = list(sampleAddr[event.tid])
CMetric_callPath[CM_Entry] = (user_call_path, kernel_call_path) #Stores call path of this critical stack trace
CM_Entry += 1
sampleAddr[event.tid]=[]
return
#Function to execute for each event written to the ring buffer
b["events"].open_perf_buffer(print_event, page_cnt=buffer_size)
#To print criticality metric of each thread
threadCM = b.get_table("CM_hash")
sum = 0;
criticalSwitch = dict()
criticalSwitch_allCM= dict()
criticalLine = dict()
critLineSamples = dict()
critLineSamples_all = dict()
critKernelPaths = dict()
allFunction = dict()
allLines = dict()
addrMap_fun = dict()
addrMap_line= dict()
def combine_Results(function, line, count, resultFunc, resultLine, tempFunc, tempLine):
#resultFunc and resultLine are for displaying the
#critical functions and lines combining results of
#Top 10 critical paths
if function:
if function in resultFunc:
resultFunc[function] += count
if line in resultLine[function]:
resultLine[function][line] += count
else:
resultLine[function][line] = count
else:
resultFunc[function] = count
resultLine[function] = dict()
resultLine[function][line] = count
#tempFunc and tempLine are functions and lines of current critical path alone
if function in tempFunc:
tempFunc[function] += count
if line in tempLine[function]:
tempLine[function][line] += count
else:
tempLine[function][line] = count
else:
tempFunc[function] = count
tempLine[function] = dict()
tempLine[function][line] = count
return
def combine_samples(addrList, resultFunc, resultLine):
tempFunc = dict()
tempLine = dict()
function = ""
line = ""
addrStringList =[]
addrCountList = []
stackTopList = []
for element, count in addrList.items():
specialString = ""
if "0x" in element:
#'0xz' pattern in the address denotes this is a stack top address(return address)
# not a sample address
if 'z' in element:
specialString=" (StackTop)"
#remove 'z'
element = element.replace('z','')
else:
specialString = ""
if element in addrMap_fun:
function = addrMap_fun[element]
line = addrMap_line[element]
#Add (StackTop) label to the line
if specialString:
line = line + specialString
#Combine all samples for this path
combine_Results(function, line, count, \
resultFunc, resultLine, tempFunc, tempLine);
else:
#Prepre to call addr2line
addrStringList.append(element)
addrCountList.append(count)
if specialString:
stackTopList.append(1)
else:
stackTopList.append(0)
#result = str(subprocess.check_output(['addr2line', '-s', '-C', '-f', '-p', '-i', element, '-e', "/data/rn1115/cfd/test/IncNavierStokesSolver-g"], stderr=subprocess.STDOUT))
else:
#library functions
function = element
line = ""
combine_Results(function, line, count, resultFunc, \
resultLine, tempFunc, tempLine)
#Map address to function name and line of code
if addrStringList != []:
cmd = ['addr2line', '-s', '-C', '-f', '-p']
cmd.extend(addrStringList)
cmdLast = ['-e', targetPath]
cmd.extend(cmdLast)
sourceLines = str(subprocess.check_output(cmd, stderr=subprocess.STDOUT))
for result in sourceLines.split('\n'):
specialString = ""
if result:
count = addrCountList.pop(0)
if stackTopList.pop(0) == 1:
specialString = " (StackTop)"
else:
specialString = ""
result = result.strip("\n ' '")
if result:
#Retrieve function and line number from addr2line result
result = result.split('\n', 1)[0]
result = result.strip("\n ' '")
if " at " in result:
function = result.split(" at ", 1)[0]
line = result.split(" at ", 1)[1]
function = function.strip()
if function:
addrMap_fun[element] = function
line = line.strip()
if line:
line = line.split(' (', 1)[0]
addrMap_line[element] = line
if specialString:
line = line + specialString
#There will not be any line if sample is not from application binary
else:
addrMap_line[element] = ""
combine_Results(function, line, count, \
resultFunc, resultLine, tempFunc, tempLine);
i=0
print("\tFunctions and lines + Frequency")
print("\t--------------------------------")
for key, value in sorted(tempFunc.items(), key=lambda x:x[1], reverse=True):
print("\n\t%s -- %u" % (key, value))
k=0
for line, count in sorted(tempLine[key].items(), key=lambda x:x[1], reverse=True):
print("\t\t%s -- %u" % (line, count))
k = k+1
if k==3:
break
i = i+1
if i == 5:
break
return
def choose_path(pathDict, strategy):
resultFunc = dict()
resultLine = dict()
i=0
print ("***************************************************")
for key, value in sorted(pathDict.items(), key=lambda x:x[1][0], reverse=True):
if ( i<10 ):
print("\nCritical Path %d -- CMetric, Frequency" % (i+1))
print("----------------------------------------")
print("\t%s --%u, %d. \n" % (key, value[0], value[1]))
addrList = critLineSamples_all[key]
#for element, count in addrList.items():
# print(element,count)
combine_samples(addrList, resultFunc, resultLine)
if get_kernel_stack == '1':
print("\n\tKernel Call Paths")
print("\t-----------------------")
for path, count in sorted(critKernelPaths[key].items(), key=lambda x:x[1], reverse=True):
print("\t%s -- %d\n" % (path, count))
i+= 1;
else:
break;
print ("***************************************************")
i=0
print ("\nTop Critical Functions and lines of code with frequency")
for key, value in sorted(resultFunc.items(), key=lambda x:x[1], reverse=True):
print("\n\t%s -- %u" % (key, value))
k=0
for line, count in sorted(resultLine[key].items(), key=lambda x:x[1], reverse=True):
print("\t\t%s -- %u" % (line, count))
k = k+1
if k==3:
break
i = i+1
if i == 10:
break
print ("***************************************************")
resultFunc.clear()
resultLine.clear()
return
try:
while 1:
b.kprobe_poll()
finally:
#Post Processing the stack traces
start = datetime.datetime.now()
print("Criticality Metric for each thread");
for k, v in sorted(threadCM.items(), key=lambda x:x[1].value):
print("%10u %u " % ((k.value), (v.value)))
sum += v.value
print ("Sum = %d" % sum)
print ("***************************************************")
#for key, value in sorted(CMetric.items(), key=lambda x:x[1], reverse= True): # key is CM_Entry, value is CMetric
for key, value in CMetric.items(): # key is CM_Entry, value is CMetric
user_callPath = CMetric_callPath[key][0]
kernel_callPath = CMetric_callPath[key][1]
#Combine all call paths irrespective of CMetric value and then sort as per CMetric value
if user_callPath in criticalSwitch_allCM:
criticalSwitch_allCM[user_callPath][0] += value
criticalSwitch_allCM[user_callPath][1] += 1
else:
criticalSwitch_allCM[user_callPath] = [value,1]
#Combine the sample addresses
if user_callPath not in critLineSamples_all:
critLineSamples_all[user_callPath] = dict()
lineDict = critLineSamples_all[user_callPath]
addrList = CMetric_sampleAddr[key]
for element in addrList:
if element in lineDict:
lineDict[element] += 1
else:
lineDict[element] = 1
#Combine kernel call paths
if user_callPath not in critKernelPaths:
critKernelPaths[user_callPath] = dict()
allKernelPaths = critKernelPaths[user_callPath]
if kernel_callPath in allKernelPaths:
allKernelPaths[kernel_callPath] += 1
else:
allKernelPaths[kernel_callPath] = 1
user_callPath = ""
kernel_callPath = ""
print ("Critical Call Paths, functions and Lines of Code:")
choose_path(criticalSwitch_allCM, 1)
end = datetime.datetime.now()
post_time = end - start
print ("Post Processing time in milli seconds: %u" % int(post_time.total_seconds() * 1000))
print ("Total switches: %u Critical switches: %u" % (total_switch, CM_Entry ))
print ("Stack trace with no samples: %u" % noSample)
print ("***************************************************")
sys.exit()
| 31,868 | 10,326 |
# -*- coding: utf-8 -*-
import random
import numpy as np
import scipy
import pandas as pd
import pandas
import numpy
import json
def resizeFeature(inputData,newSize):
# inputX: (temporal_length,feature_dimension) #
originalSize=len(inputData)
#print originalSize
if originalSize==1:
inputData=np.reshape(inputData,[-1])
return np.stack([inputData]*newSize)
x=numpy.array(range(originalSize))
f=scipy.interpolate.interp1d(x,inputData,axis=0)
x_new=[i*float(originalSize-1)/(newSize-1) for i in range(newSize)]
y_new=f(x_new)
return y_new
def readData(video_name,data_type=["spatial","temporal"]):
spatial_dir="./spatial/csv_action/"
temporal_dir="./temporal/csv_action/"
data=[]
for dtype in data_type:
if dtype=="spatial":
df=pandas.read_csv(spatial_dir+video_name+".csv")
elif dtype=="temporal":
df=pandas.read_csv(temporal_dir+video_name+".csv")
data.append(df.values[:,:])
lens=[len(d) for d in data]
#print lens
min_len=min(lens)
new_data=[d[:min_len] for d in data]
new_data=numpy.concatenate(new_data,axis=1)
return new_data
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
def getDatasetDict():
df=pd.read_csv("./info/video_info.csv")
json_data= load_json("./info/activity_net.v1-3.min.json")
database=json_data['database']
out_dict={}
for i in range(len(df)):
video_name=df.video.values[i]
video_info=database[video_name[2:]]
video_new_info={}
video_new_info['duration_frame']=df.numFrame.values[i]
video_new_info['duration_second']=df.seconds.values[i]
video_new_info['annotations']=video_info['annotations']
out_dict[video_name]=video_new_info
return out_dict
def poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean"):
feature_frame=len(data)*16
video_frame=videoAnno['duration_frame']
video_second=videoAnno['duration_second']
corrected_second=float(feature_frame)/video_frame*video_second
fps=float(video_frame)/video_second
st=16/fps
if len(data)==1:
video_feature=np.stack([data]*num_prop)
video_feature=np.reshape(video_feature,[num_prop,400])
return video_feature
x=[st/2+ii*st for ii in range(len(data))]
f=scipy.interpolate.interp1d(x,data,axis=0)
video_feature=[]
zero_sample=np.zeros(num_bin*400)
tmp_anchor_xmin=[1.0/num_prop*i for i in range(num_prop)]
tmp_anchor_xmax=[1.0/num_prop*i for i in range(1,num_prop+1)]
num_sample=num_bin*num_sample_bin
for idx in range(num_prop):
xmin=max(x[0]+0.0001,tmp_anchor_xmin[idx]*corrected_second)
xmax=min(x[-1]-0.0001,tmp_anchor_xmax[idx]*corrected_second)
if xmax<x[0]:
#print "fuck"
video_feature.append(zero_sample)
continue
if xmin>x[-1]:
video_feature.append(zero_sample)
continue
plen=(xmax-xmin)/(num_sample-1)
x_new=[xmin+plen*ii for ii in range(num_sample)]
y_new=f(x_new)
y_new_pool=[]
for b in range(num_bin):
tmp_y_new=y_new[num_sample_bin*b:num_sample_bin*(b+1)]
if pool_type=="mean":
tmp_y_new=np.mean(y_new,axis=0)
elif pool_type=="max":
tmp_y_new=np.max(y_new,axis=0)
y_new_pool.append(tmp_y_new)
y_new_pool=np.stack(y_new_pool)
y_new_pool=np.reshape(y_new_pool,[-1])
video_feature.append(y_new_pool)
video_feature=np.stack(video_feature)
return video_feature
videoDict=getDatasetDict()
videoNameList=videoDict.keys()
random.shuffle(videoNameList)
col_names=[]
for i in range(400):
col_names.append("f"+str(i))
for videoName in videoNameList:
videoAnno=videoDict[videoName]
data=readData(videoName)
numFrame=videoAnno['duration_frame']
featureFrame=len(data)*16
videoAnno["feature_frame"]=featureFrame
videoDict[videoName]=videoAnno
print(numFrame,featureFrame)
videoFeature_mean=poolData(data,videoAnno,num_prop=100,num_bin=1,num_sample_bin=3,pool_type="mean")
outDf=pd.DataFrame(videoFeature_mean,columns=col_names)
outDf.to_csv("./csv_mean_100/"+videoName+".csv",index=False)
outfile=open("./anet_anno_anet.json","w")
json.dump(videoDict,outfile)
outfile.close() | 4,484 | 1,666 |
from cloudevents.sdk.event import v03
import json
from ferris_cli.ferris_cli import CloudEventsAPI
import uuid
import os
import consul
from ferris_cli.ferris_cli import ApplicationConfigurator
from datetime import datetime
def send_direct_loading_event(hdfs_path):
data = {"file_location": hdfs_path }
event = (
v03.Event()
.SetContentType("application/json")
.SetData(json.dumps(data))
.SetEventID("my-id")
.SetSource("ferris.apps.dataloader.minio-adapter")
.SetEventType("ferris.dataloader.file_direct_loaded_to_hdfs")
)
print(json.dumps(event.Properties()))
cca = CloudEventsAPI()
cca.send(event)
def send_confirmation_event(hdfs_path):
data = {"file_location": hdfs_path }
event = (
v03.Event()
.SetContentType("application/json")
.SetData(json.dumps(data))
.SetEventID("my-id")
.SetSource("ferris.apps.dataloader.minio-adapter")
.SetEventType("ferris.dataloader.file_loaded_to_hdfs")
)
print(json.dumps(event.Properties()))
broker = ':'.join([platform_environment['KAFKA_BOOTSTRAP_SERVER'],platform_environment['KAFKA_PORT']])
print(broker)
cca = CloudEventsAPI()
cca.send(event)
platform_environment = ApplicationConfigurator().get('ferris.env')
broker = f"kafka://{platform_environment['KAFKA_BOOTSTRAP_SERVER']}:{platform_environment['KAFKA_PORT']}"
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("%Y-%m-%dT%H:%M:%SZ")
print(timestampStr)
send_direct_loading_event('/landing/zone/abc')
send_confirmation_event('/landing/zone/abc')
| 1,622 | 567 |
'''
GERADOR DE RELATÓRIO DOS SERVIDORES (DASHBOARD SERVIDORES)
'''
from SQL import sqlpandas
from MENSAGEM import mensagemInformacao, mensagemErro
from AUXILIAR import salvarPandas
def dashboardServidores():
'''
FUNÇÃO PARA CRIAR OS DASHDOARD
ENTRA
ENTRA NULL
SAI
PLANILHA COM OS DADOS PARA DASHBOARD
'''
def faixa(idade):
if idade > 67:
fx = '68-77'
elif idade > 57:
fx = '58-67'
elif idade > 47:
fx = '48-57'
elif idade > 37:
fx = '38-47'
elif idade > 27:
fx = '28-37'
else:
fx = '18-27'
return fx
sql = '''SELECT
GR_MATRICULA AS SIAPE,
IT_NO_SERVIDOR AS SERVIDOR,
IDADE,
IT_CO_SEXO AS SEXO,
DES_TITULACAO AS TITULAÇÃO,
DES_ETNIA AS ETNIA,
DES_REGIME_JURIDICO AS 'REG JUR',
IT_CO_JORNADA_TRABALHO as 'CARGA HORÁRIA',
DES_CARREIRA AS CARREIRA,
DES_CARGO AS CARGO,
DES_GRUPO AS GRUPO,
DES_UPAG AS UPAG
FROM tb_ser_rel
where
IT_DA_OCOR_EXCLUSAO_SERV is null
and IT_DA_OCOR_INATIVIDADE_SERV is null
and DES_CARREIRA in ('TÉCN', 'PROF 2º', 'PROF 3º');'''
dados = sqlpandas(sql)
if len(dados) > 0:
dados['IDADE'] = dados['IDADE'].apply(faixa)
dados['TITULAÇÃO'] = dados['TITULAÇÃO'].replace(['10 DOUTORADO', '08 ESPECIALIZAÇÃO', '09 MESTRADO', '06 MEDIO',
'04 FUNDAMENTAL I', '05 FUNDAMENTAL', '07 SUPERIOR',
'07 ENSINO SUPERIOR', '10 PHD', '07 SUPERIOR-INCOMPLETO'],
['DOUTORADO', 'ESPECIALIZAÇÃO', 'MESTRADO', 'ENSINO MÉDIO',
'ENSINO FUNDAMENTAL', 'ENSINO FUNDAMENTAL', 'ENSINO SUPERIOR',
'ENSINO SUPERIOR', 'DOUTORADO', 'ENSINO MÉDIO'])
dados['TOTAL'] = 1
if len(dados) > 0:
salvarPandas(dados, 'DAHSBOARD - SERVIDORES')
mensagemInformacao('Relatório DAHSBOARD - SERVIDORES criado com sucesso.')
else:
mensagemErro('Relatório DAHSBOARD - SERVIDORES não foi criado.')
| 2,355 | 963 |
from pyroombaadapter.pyroombaadapter import PyRoombaAdapter
| 60 | 18 |
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
from torch.nn import Parameter
import math
from torchkit.util.utils import l2_norm
from torchkit.head.localfc.common import calc_logits
class CurricularFace(nn.Module):
""" Implement of CurricularFace (https://arxiv.org/abs/2004.00288)
"""
def __init__(self,
in_features,
out_features,
scale=64.0,
margin=0.5,
alpha=0.1):
""" Args:
in_features: size of each input features
out_features: size of each output features
scale: norm of input feature
margin: margin
"""
super(CurricularFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.margin = margin
self.scale = scale
self.alpha = alpha
self.cos_m = math.cos(margin)
self.sin_m = math.sin(margin)
self.threshold = math.cos(math.pi - margin)
self.mm = math.sin(math.pi - margin) * margin
self.kernel = Parameter(torch.Tensor(in_features, out_features))
self.register_buffer('t', torch.zeros(1))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embeddings, labels):
cos_theta, origin_cos = calc_logits(embeddings, self.kernel)
target_logit = cos_theta[torch.arange(0, embeddings.size(0)), labels].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
cos_theta_m = target_logit * self.cos_m - sin_theta * self.sin_m # cos(target+margin)
mask = cos_theta > cos_theta_m
final_target_logit = torch.where(target_logit > self.threshold, cos_theta_m, target_logit - self.mm)
hard_example = cos_theta[mask]
with torch.no_grad():
self.t = target_logit.mean() * self.alpha + (1 - self.alpha) * self.t
cos_theta[mask] = hard_example * (self.t + hard_example)
cos_theta.scatter_(1, labels.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.scale
return output, origin_cos * self.scale
| 2,189 | 709 |
from core.views import BaseViewSet
from .models import Cash
from .serializers import CashSerializer
class CashViewSet(BaseViewSet):
"""
- Vybirame jen typ 4 coz jsou platby z Banky
- Setridime nenovejsim datem na zacatek
"""
queryset = Cash.objects.all().filter(type=4).order_by('-time')
serializer_class = CashSerializer
filter_fields = ['comment', 'userid']
| 390 | 131 |
import atexit
import sqlite3
import traceback
#################
import sys
sys.path.append('/app')
from helper import *
settings_dict = load_yaml_dict(read_file("/Settings.yaml"))
conn = sqlite3.connect(f"/database/{settings_dict['db_name']}", isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
atexit.register(conn.close)
atexit.register(cursor.close)
version = read_file("/VERSION").rstrip()
# This tells us whether the migration has already happened.
check_sql = '''SELECT COUNT(*) AS count
FROM pragma_table_info("problems")
WHERE name = "expected_text_output"'''
cursor.execute(check_sql)
check_result = cursor.fetchone()["count"]
if check_result > 0:
print("NotNeeded")
else:
alter_sql_list = ['ALTER TABLE problems RENAME COLUMN expected_output TO expected_text_output',
'ALTER TABLE problems ADD COLUMN expected_image_output text NOT NULL DEFAULT ""',
'''UPDATE problems
SET expected_image_output = expected_text_output
WHERE output_type = "jpg"''',
'''UPDATE problems
SET expected_text_output = ""
WHERE output_type = "jpg"''',
'ALTER TABLE submissions RENAME COLUMN code_output TO text_output',
'ALTER TABLE submissions ADD COLUMN image_output text NOT NULL DEFAULT ""',
'''UPDATE submissions
SET image_output = text_output
WHERE problem_id IN (SELECT problem_id FROM problems WHERE output_type = "jpg")''',
'''UPDATE submissions
SET text_output = ""
WHERE problem_id IN (SELECT problem_id FROM problems WHERE output_type = "jpg")''',
'''CREATE TABLE IF NOT EXISTS submissions2 (
course_id integer NOT NULL,
assignment_id integer NOT NULL,
problem_id integer NOT NULL,
user_id text NOT NULL,
submission_id integer NOT NULL,
code text NOT NULL,
text_output text NOT NULL,
image_output text NOT NULL,
passed integer NOT NULL,
date timestamp NOT NULL,
FOREIGN KEY (course_id) REFERENCES courses (course_id) ON DELETE CASCADE,
FOREIGN KEY (assignment_id) REFERENCES assignments (assignment_id) ON DELETE CASCADE,
FOREIGN KEY (problem_id) REFERENCES problems (problem_id) ON DELETE CASCADE,
FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE,
PRIMARY KEY (course_id, assignment_id, problem_id, user_id, submission_id))''',
'''INSERT INTO submissions2
SELECT course_id, assignment_id, problem_id, user_id, submission_id, code,
text_output, image_output, passed, date
FROM submissions''',
'DROP TABLE IF EXISTS submissions',
'ALTER TABLE submissions2 RENAME TO submissions'
]
error_occurred = False
for sql in alter_sql_list:
try:
cursor.execute(sql)
except:
print(sql)
print(traceback.format_exc())
error_occurred = True
if not error_occurred:
print("Success")
| 3,730 | 953 |
#!/usr/bin/env python3
"""
example.py
Example of using pypahdb to decompose an astronomical PAH spectrum.
"""
import pkg_resources
from pypahdb.decomposer import Decomposer
from pypahdb.observation import Observation
if __name__ == '__main__':
# The sample data (IPAC table).
file_path = 'resources/sample_data_NGC7023.tbl'
data_file = pkg_resources.resource_filename('pypahdb', file_path)
# Construct an Observation object.
obs = Observation(data_file)
# Pass the Observation's spectrum to Decomposer, which performs the fit.
pahdb_fit = Decomposer(obs.spectrum)
# Save the fit to disk, both as a PDF and FITS file.
pahdb_fit.save_pdf('NGC7023_pypahdb_tbl_example.pdf', domaps=False)
pahdb_fit.save_fits('NGC7023_pypahdb_tbl_example.fits', header=obs.header)
| 809 | 297 |
"""
Mapping registries for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
from .const import (
DEVICE_CLASS, SINGLE_INPUT_CLUSTER_DEVICE_CLASS,
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS, COMPONENT_CLUSTERS, HUMIDITY,
TEMPERATURE, ILLUMINANCE, PRESSURE, METERING, ELECTRICAL_MEASUREMENT,
EVENT_RELAY_CLUSTERS, OPENING, ZONE,
OCCUPANCY, CLUSTER_REPORT_CONFIGS, REPORT_CONFIG_IMMEDIATE,
REPORT_CONFIG_ASAP, REPORT_CONFIG_DEFAULT, REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT, REPORT_CONFIG_OP,
NO_SENSOR_CLUSTERS, BINDABLE_CLUSTERS, ACCELERATION, SENSOR_TYPES,
BINARY_SENSOR_TYPES, RADIO_TYPES, RadioType, RADIO, RADIO_DESCRIPTION,
CONTROLLER
)
SMARTTHINGS_HUMIDITY_CLUSTER = 64581
SMARTTHINGS_ACCELERATION_CLUSTER = 64514
def establish_device_mappings():
"""Establish mappings between ZCL objects and HA ZHA objects.
These cannot be module level, as importing bellows must be done in a
in a function.
"""
from zigpy import zcl
from zigpy.profiles import PROFILES, zha, zll
if zha.PROFILE_ID not in DEVICE_CLASS:
DEVICE_CLASS[zha.PROFILE_ID] = {}
if zll.PROFILE_ID not in DEVICE_CLASS:
DEVICE_CLASS[zll.PROFILE_ID] = {}
def get_ezsp_radio():
import bellows.ezsp
from bellows.zigbee.application import ControllerApplication
return {
RADIO: bellows.ezsp.EZSP(),
CONTROLLER: ControllerApplication
}
RADIO_TYPES[RadioType.ezsp.name] = {
RADIO: get_ezsp_radio,
RADIO_DESCRIPTION: 'EZSP'
}
def get_xbee_radio():
import zigpy_xbee.api
from zigpy_xbee.zigbee.application import ControllerApplication
return {
RADIO: zigpy_xbee.api.XBee(),
CONTROLLER: ControllerApplication
}
RADIO_TYPES[RadioType.xbee.name] = {
RADIO: get_xbee_radio,
RADIO_DESCRIPTION: 'XBee'
}
def get_deconz_radio():
import zigpy_deconz.api
from zigpy_deconz.zigbee.application import ControllerApplication
return {
RADIO: zigpy_deconz.api.Deconz(),
CONTROLLER: ControllerApplication
}
RADIO_TYPES[RadioType.deconz.name] = {
RADIO: get_deconz_radio,
RADIO_DESCRIPTION: 'Deconz'
}
EVENT_RELAY_CLUSTERS.append(zcl.clusters.general.LevelControl.cluster_id)
EVENT_RELAY_CLUSTERS.append(zcl.clusters.general.OnOff.cluster_id)
NO_SENSOR_CLUSTERS.append(zcl.clusters.general.Basic.cluster_id)
NO_SENSOR_CLUSTERS.append(
zcl.clusters.general.PowerConfiguration.cluster_id)
NO_SENSOR_CLUSTERS.append(zcl.clusters.lightlink.LightLink.cluster_id)
BINDABLE_CLUSTERS.append(zcl.clusters.general.LevelControl.cluster_id)
BINDABLE_CLUSTERS.append(zcl.clusters.general.OnOff.cluster_id)
BINDABLE_CLUSTERS.append(zcl.clusters.lighting.Color.cluster_id)
DEVICE_CLASS[zha.PROFILE_ID].update({
zha.DeviceType.ON_OFF_SWITCH: 'binary_sensor',
zha.DeviceType.LEVEL_CONTROL_SWITCH: 'binary_sensor',
zha.DeviceType.REMOTE_CONTROL: 'binary_sensor',
zha.DeviceType.SMART_PLUG: 'switch',
zha.DeviceType.LEVEL_CONTROLLABLE_OUTPUT: 'light',
zha.DeviceType.ON_OFF_LIGHT: 'light',
zha.DeviceType.DIMMABLE_LIGHT: 'light',
zha.DeviceType.COLOR_DIMMABLE_LIGHT: 'light',
zha.DeviceType.ON_OFF_LIGHT_SWITCH: 'binary_sensor',
zha.DeviceType.DIMMER_SWITCH: 'binary_sensor',
zha.DeviceType.COLOR_DIMMER_SWITCH: 'binary_sensor',
})
DEVICE_CLASS[zll.PROFILE_ID].update({
zll.DeviceType.ON_OFF_LIGHT: 'light',
zll.DeviceType.ON_OFF_PLUGIN_UNIT: 'switch',
zll.DeviceType.DIMMABLE_LIGHT: 'light',
zll.DeviceType.DIMMABLE_PLUGIN_UNIT: 'light',
zll.DeviceType.COLOR_LIGHT: 'light',
zll.DeviceType.EXTENDED_COLOR_LIGHT: 'light',
zll.DeviceType.COLOR_TEMPERATURE_LIGHT: 'light',
zll.DeviceType.COLOR_CONTROLLER: 'binary_sensor',
zll.DeviceType.COLOR_SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.CONTROLLER: 'binary_sensor',
zll.DeviceType.SCENE_CONTROLLER: 'binary_sensor',
zll.DeviceType.ON_OFF_SENSOR: 'binary_sensor',
})
SINGLE_INPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'switch',
zcl.clusters.measurement.RelativeHumidity: 'sensor',
# this works for now but if we hit conflicts we can break it out to
# a different dict that is keyed by manufacturer
SMARTTHINGS_HUMIDITY_CLUSTER: 'sensor',
zcl.clusters.measurement.TemperatureMeasurement: 'sensor',
zcl.clusters.measurement.PressureMeasurement: 'sensor',
zcl.clusters.measurement.IlluminanceMeasurement: 'sensor',
zcl.clusters.smartenergy.Metering: 'sensor',
zcl.clusters.homeautomation.ElectricalMeasurement: 'sensor',
zcl.clusters.security.IasZone: 'binary_sensor',
zcl.clusters.measurement.OccupancySensing: 'binary_sensor',
zcl.clusters.hvac.Fan: 'fan',
SMARTTHINGS_ACCELERATION_CLUSTER: 'binary_sensor',
})
SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.update({
zcl.clusters.general.OnOff: 'binary_sensor',
})
SENSOR_TYPES.update({
zcl.clusters.measurement.RelativeHumidity.cluster_id: HUMIDITY,
SMARTTHINGS_HUMIDITY_CLUSTER: HUMIDITY,
zcl.clusters.measurement.TemperatureMeasurement.cluster_id:
TEMPERATURE,
zcl.clusters.measurement.PressureMeasurement.cluster_id: PRESSURE,
zcl.clusters.measurement.IlluminanceMeasurement.cluster_id:
ILLUMINANCE,
zcl.clusters.smartenergy.Metering.cluster_id: METERING,
zcl.clusters.homeautomation.ElectricalMeasurement.cluster_id:
ELECTRICAL_MEASUREMENT,
})
BINARY_SENSOR_TYPES.update({
zcl.clusters.measurement.OccupancySensing.cluster_id: OCCUPANCY,
zcl.clusters.security.IasZone.cluster_id: ZONE,
zcl.clusters.general.OnOff.cluster_id: OPENING,
SMARTTHINGS_ACCELERATION_CLUSTER: ACCELERATION,
})
CLUSTER_REPORT_CONFIGS.update({
zcl.clusters.general.Alarms.cluster_id: [],
zcl.clusters.general.Basic.cluster_id: [],
zcl.clusters.general.Commissioning.cluster_id: [],
zcl.clusters.general.Identify.cluster_id: [],
zcl.clusters.general.Groups.cluster_id: [],
zcl.clusters.general.Scenes.cluster_id: [],
zcl.clusters.general.Partition.cluster_id: [],
zcl.clusters.general.Ota.cluster_id: [],
zcl.clusters.general.PowerProfile.cluster_id: [],
zcl.clusters.general.ApplianceControl.cluster_id: [],
zcl.clusters.general.PollControl.cluster_id: [],
zcl.clusters.general.GreenPowerProxy.cluster_id: [],
zcl.clusters.general.OnOffConfiguration.cluster_id: [],
zcl.clusters.lightlink.LightLink.cluster_id: [],
zcl.clusters.general.OnOff.cluster_id: [{
'attr': 'on_off',
'config': REPORT_CONFIG_IMMEDIATE
}],
zcl.clusters.general.LevelControl.cluster_id: [{
'attr': 'current_level',
'config': REPORT_CONFIG_ASAP
}],
zcl.clusters.lighting.Color.cluster_id: [{
'attr': 'current_x',
'config': REPORT_CONFIG_DEFAULT
}, {
'attr': 'current_y',
'config': REPORT_CONFIG_DEFAULT
}, {
'attr': 'color_temperature',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.measurement.RelativeHumidity.cluster_id: [{
'attr': 'measured_value',
'config': (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
50
)
}],
zcl.clusters.measurement.TemperatureMeasurement.cluster_id: [{
'attr': 'measured_value',
'config': (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
50
)
}],
SMARTTHINGS_ACCELERATION_CLUSTER: [{
'attr': 'acceleration',
'config': REPORT_CONFIG_ASAP
}, {
'attr': 'x_axis',
'config': REPORT_CONFIG_ASAP
}, {
'attr': 'y_axis',
'config': REPORT_CONFIG_ASAP
}, {
'attr': 'z_axis',
'config': REPORT_CONFIG_ASAP
}],
SMARTTHINGS_HUMIDITY_CLUSTER: [{
'attr': 'measured_value',
'config': (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
50
)
}],
zcl.clusters.measurement.PressureMeasurement.cluster_id: [{
'attr': 'measured_value',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.measurement.IlluminanceMeasurement.cluster_id: [{
'attr': 'measured_value',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.smartenergy.Metering.cluster_id: [{
'attr': 'instantaneous_demand',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.homeautomation.ElectricalMeasurement.cluster_id: [{
'attr': 'active_power',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.general.PowerConfiguration.cluster_id: [{
'attr': 'battery_voltage',
'config': REPORT_CONFIG_DEFAULT
}, {
'attr': 'battery_percentage_remaining',
'config': REPORT_CONFIG_DEFAULT
}],
zcl.clusters.measurement.OccupancySensing.cluster_id: [{
'attr': 'occupancy',
'config': REPORT_CONFIG_IMMEDIATE
}],
zcl.clusters.hvac.Fan.cluster_id: [{
'attr': 'fan_mode',
'config': REPORT_CONFIG_OP
}],
})
# A map of hass components to all Zigbee clusters it could use
for profile_id, classes in DEVICE_CLASS.items():
profile = PROFILES[profile_id]
for device_type, component in classes.items():
if component not in COMPONENT_CLUSTERS:
COMPONENT_CLUSTERS[component] = (set(), set())
clusters = profile.CLUSTERS[device_type]
COMPONENT_CLUSTERS[component][0].update(clusters[0])
COMPONENT_CLUSTERS[component][1].update(clusters[1])
| 10,448 | 3,778 |
import random
import pickle
import cv2
import numpy as np
import paddle
import paddleseg.transforms as T
from .points_sampler import MultiPointSampler
def get_unique_labels(x, exclude_zero=False):
obj_sizes = np.bincount(x.flatten())
labels = np.nonzero(obj_sizes)[0].tolist()
if exclude_zero:
labels = [x for x in labels if x != 0]
return labels
class ISDataset(paddle.io.Dataset):
def __init__(self,
augmentator=None,
points_sampler=MultiPointSampler(max_num_points=12),
min_object_area=0,
min_ignore_object_area=10,
keep_background_prob=0.0,
with_image_info=False,
samples_scores_path=None,
samples_scores_gamma=1.0,
epoch_len=-1):
super(ISDataset, self).__init__()
self.epoch_len = epoch_len
self.augmentator = augmentator
self.min_object_area = min_object_area
self.keep_background_prob = keep_background_prob
self.points_sampler = points_sampler
self.with_image_info = with_image_info
self.samples_precomputed_scores = self._load_samples_scores(samples_scores_path, samples_scores_gamma)
self.dataset_samples = None
def to_tensor(self, x):
if isinstance(x, np.ndarray):
if x.ndim == 2:
x = x[:,:,None]
img = x.transpose([2,0,1]).astype('float32') / 255
return img
def __getitem__(self, index):
sample = self.get_sample(index)
sample = self.augment_sample(sample)
sample.remove_small_objects(self.min_object_area)
self.points_sampler.sample_object(sample)
points = np.array(self.points_sampler.sample_points()).astype('float32')
mask = self.points_sampler.selected_mask
image = self.to_tensor(sample.image)
ids = sample.sample_id
return image, points, mask
def augment_sample(self, sample):
if self.augmentator is None:
return sample
valid_augmentation = False
while not valid_augmentation:
sample.augment(self.augmentator)
keep_sample = (self.keep_background_prob < 0.0 or
random.random() < self.keep_background_prob)
valid_augmentation = len(sample) > 0 or keep_sample
return sample
def get_sample(self, index):
raise NotImplementedError
def __len__(self):
if self.epoch_len > 0:
return self.epoch_len
else:
return self.get_samples_number()
def get_samples_number(self):
return len(self.dataset_samples)
@staticmethod
def _load_samples_scores(samples_scores_path, samples_scores_gamma):
if samples_scores_path is None:
return None
with open(samples_scores_path, 'rb') as f:
images_scores = pickle.load(f)
probs = np.array([(1.0 - x[2]) ** samples_scores_gamma for x in images_scores])
probs /= probs.sum()
samples_scores = {
'indices': [x[0] for x in images_scores],
'probs': probs
}
print(f'Loaded {len(probs)} weights with gamma={samples_scores_gamma}')
return samples_scores | 3,292 | 1,021 |
#!/usr/bin/env python3
"""
Created on Wed Feb 17 08:32:55 2021
Deface anatomical image(s)
@author: dlevitas
"""
import os, sys
import nibabel as nib
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('dark_background')
from math import floor
os.environ[ 'MPLCONFIGDIR' ] = '/tmp/'
print("loading image to create thumbnail "+sys.argv[1])
image = nib.load(sys.argv[1])
output_image = sys.argv[2]
object_img_array = image.dataobj[:]
slice_x = object_img_array[floor(object_img_array.shape[0]/2), :, :]
slice_y = object_img_array[:, floor(object_img_array.shape[1]/2), :]
slice_z = object_img_array[:, :, floor(object_img_array.shape[2]/2)]
fig, axes = plt.subplots(1,3, figsize=(9,3))
for i, slice in enumerate([slice_x, slice_y, slice_z]):
print("creating thumbnail "+str(i))
axes[i].imshow(slice.T, cmap="gray", origin="lower", aspect='auto')
axes[i].axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.savefig(output_image, bbox_inches='tight')
| 1,008 | 403 |
"""Infrastructure for registering and firing callbacks on application events.
Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
be called at specific times, or a collection of alternative methods to try,
callbacks are designed to be used by extension authors. A number of callbacks
can be registered for the same event without needing to be aware of one another.
The functions defined in this module are no-ops indicating the names of available
events and the arguments which will be passed to them.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
from __future__ import print_function
class EventManager(object):
"""Manage a collection of events and a sequence of callbacks for each.
This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
instances as an ``events`` attribute.
.. note::
This API is experimental in IPython 2.0, and may be revised in future versions.
"""
def __init__(self, shell, available_events):
"""Initialise the :class:`CallbackManager`.
Parameters
----------
shell
The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
available_callbacks
An iterable of names for callback events.
"""
self.shell = shell
self.callbacks = {n: [] for n in available_events}
def register(self, event, function):
"""Register a new event callback
Parameters
----------
event : str
The event for which to register this callback.
function : callable
A function to be called on the given event. It should take the same
parameters as the appropriate callback prototype.
Raises
------
TypeError
If ``function`` is not callable.
KeyError
If ``event`` is not one of the known events.
"""
if not callable(function):
raise TypeError('Need a callable, got %r' % function)
self.callbacks[event].append(function)
def unregister(self, event, function):
"""Remove a callback from the given event."""
self.callbacks[event].remove(function)
def reset(self, event):
"""Clear all callbacks for the given event."""
self.callbacks[event] = []
def reset_all(self):
"""Clear all callbacks for all events."""
self.callbacks = {n: [] for n in self.callbacks}
def trigger(self, event, *args, **kwargs):
"""Call callbacks for ``event``.
Any additional arguments are passed to all callbacks registered for this
event. Exceptions raised by callbacks are caught, and a message printed.
"""
for func in self.callbacks[event]:
try:
func(*args, **kwargs)
except Exception:
print("Error in callback {} (for {}):".format(func, event))
self.shell.showtraceback()
# event_name -> prototype mapping
available_events = {}
def _define_event(callback_proto):
available_events[callback_proto.__name__] = callback_proto
return callback_proto
# ------------------------------------------------------------------------
# Callback prototypes
#
# No-op functions which describe the names of available events and the
# signatures of callbacks for those events.
# ------------------------------------------------------------------------
@_define_event
def pre_execute():
"""Fires before code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells."""
pass
@_define_event
def pre_run_cell():
"""Fires before user-entered code runs."""
pass
@_define_event
def post_execute():
"""Fires after code is executed in response to user/frontend action.
This includes comm and widget messages and silent execution, as well as user
code cells."""
pass
@_define_event
def post_run_cell():
"""Fires after user-entered code runs."""
pass
@_define_event
def shell_initialized(ip):
"""Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
This is before extensions and startup scripts are loaded, so it can only be
set by subclassing.
Parameters
----------
ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
The newly initialised shell.
"""
pass
| 4,511 | 1,163 |
from app import app
from services import TopicServices
from flask import jsonify, request
@app.route('/topic/terms', methods=['GET'])
def getTerms():
numOfTerms = request.args.get("numOfTerms")
results = TopicServices.getTerms(numOfTerms)
return jsonify({ 'results': results }) | 290 | 89 |
#!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uaclientname and -uaclientversion option."""
import re
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UseragentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test -uaclientname and -uaclientversion")
default_useragent = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:"
assert_equal(default_useragent[:len(expected)], expected)
default_version = default_useragent[default_useragent.index(':') + 1:]
default_version = default_version[:default_version.index('/')]
self.restart_node(0, ["-uaclientname=Foo Client"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = f"/Foo Client:{default_version}"
assert_equal(foo_ua[:len(expected)], expected)
self.restart_node(0, ["-uaclientversion=123.45"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:123.45"
assert_equal(foo_ua[:len(expected)], expected)
self.log.info(
"non-numeric version allowed (although not recommended in BIP14)")
self.restart_node(0, ["-uaclientversion=Version Two"])
foo_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bitcoin ABC:Version Two"
assert_equal(foo_ua[:len(expected)], expected)
self.log.info("test -uaclient doesn't break -uacomment")
self.restart_node(0, ["-uaclientname=Bar Client",
"-uaclientversion=3000",
"-uacomment=spam bacon and eggs"])
bar_ua = self.nodes[0].getnetworkinfo()["subversion"]
expected = "/Bar Client:3000"
assert_equal(bar_ua[:len(expected)], expected)
assert "spam bacon and eggs" in bar_ua
self.log.info("test -uaclientname max length")
self.stop_node(0)
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + "a" * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uaclientversion max length")
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientversion=" + "a" * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uaclientname and -uaclientversion max length")
expected = r"Error: Total length of network version string \([0-9]+\) exceeds maximum length \([0-9]+\)\. Reduce the number or size of uacomments\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + "a" * 128, "-uaclientversion=" + "a" * 128], expected, match=ErrorMatch.FULL_REGEX)
self.log.info(
"test -uaclientname and -uaclientversion invalid characters")
for invalid_char in ['/', ':', '(', ')', '*', '!', '₿', '🏃']:
# for client name
expected = r"Error: -uaclientname \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
# for client version
expected = r"Error: -uaclientversion \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientversion=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
# for both
expected = r"Error: -uaclientname \(" + \
re.escape(invalid_char) + r"\) contains invalid characters\."
self.nodes[0].assert_start_raises_init_error(
["-uaclientname=" + invalid_char,
"-uaclientversion=" + invalid_char],
expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UseragentTest().main()
| 4,602 | 1,438 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import unittest
import numpy as np
import numpy.testing as npt
import pandas as pd
from riip.material import RiiMaterial
class KnownValues(unittest.TestCase):
known_values = [
(1, [0.0 for _ in range(17)], 1.0, 1.0),
(1, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(5 / 3)),
(1, [0.5 if _ < 3 else 0 for _ in range(17)], 1.0, np.sqrt(6.5 / 3)),
(2, [0.0 for _ in range(17)], 1.0, 1.0),
(2, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(5 / 3)),
(2, [0.5 if _ < 3 else 0 for _ in range(17)], 1.0, np.sqrt(2.5)),
(3, [0.0 for _ in range(17)], 1.0, 0.0),
(3, [1.0 if _ < 3 else 0 for _ in range(17)], 0.5, np.sqrt(1.5)),
(
3,
[0.5 if _ < 3 else 0 for _ in range(17)],
2.0,
np.sqrt((1 + np.sqrt(2)) / 2),
),
(4, [0.0 for _ in range(17)], 0.5, 0.0),
(4, [1.0, 1.0, 2.0, 3.0, 2.0] + [0 for _ in range(5, 17)], 2.0, np.sqrt(1 / 5)),
(
4,
[0.0 for _ in range(9)] + [0.5 for _ in range(9, 17)],
2.0,
np.sqrt(2 * np.sqrt(2)),
),
(5, [0.0 for _ in range(11)], 1.0, 0.0),
(5, [2.0 for _ in range(11)], 0.5, 4.5),
(5, [0.5 for _ in range(11)], 2.0, 0.5 + 2.5 * np.sqrt(2)),
(6, [0.0 for _ in range(11)], 1.0, 1.0),
(6, [0.5 for _ in range(11)], 2.0, 11.5),
(6, [0.2 for _ in range(11)], 0.25, 1.2 - 1 / 15.8),
(7, [0.0 for _ in range(6)], 0.5, 0.0),
(
7,
[1.0 for _ in range(6)],
np.sqrt(1.028),
3 + 1.028 + 1.028 ** 2 + 1.028 ** 3,
),
(7, [1.0, 0, 0, 0.5, 0.5, 0.5], 0.5, 1 + 21 / 2 ** 7),
(8, [0.0 for _ in range(4)], 0.5, 1.0),
(8, [0.1 for _ in range(4)], np.sqrt(0.2), np.sqrt(1.64 / 0.68)),
(8, [0.2, 0, 0, 0.2], 0.5, np.sqrt(1.5 / 0.75)),
(9, [0.0 for _ in range(6)], 0.5, 0.0),
(
9,
[1.0 for _ in range(6)],
np.sqrt(2),
np.sqrt(2 + (np.sqrt(2) - 1) / (4 - 2 * np.sqrt(2))),
),
(9, [1.0 for _ in range(6)], 2.0, np.sqrt(11 / 6)),
]
known_values_for_tabulated = [
(0, [[0.01 * i, 0.0, 0.0] for i in range(100)], 0.5, (0.0, 0.0)),
(0, [[0.01 * i, 0.02 * i, 0.0] for i in range(100)], 0.5, (1.0, 0.0)),
(0, [[0.01 * i, 1.3, 0.01 * i] for i in range(100)], 0.5, (1.3, 0.5)),
]
def test_dispersion_formula_known_values(self):
"""dispersion_formula should return function."""
for i, (formula, cs, wl, result) in enumerate(self.known_values):
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [formula],
"tabulated": [""],
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
print(cs)
data = pd.DataFrame({"id": 0, "c": cs}).set_index("id")
material = RiiMaterial(0, catalog, data)
n = material.n(wl)
npt.assert_array_almost_equal(n, np.atleast_1d(result))
def test_dispersion_formula_for_tabulated(self):
"""dispersion_formula should return function."""
for i, (formula, wlnk, wl, result) in enumerate(
self.known_values_for_tabulated
):
_wlnk = np.asarray(wlnk)
wls = _wlnk[:, 0]
ns = _wlnk[:, 1]
ks = _wlnk[:, 2]
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [formula],
"tabulated": ["nk"],
"num_n": 100,
"num_k": 100,
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
data = pd.DataFrame(
{"id": 0, "wl_n": wls, "n": ns, "wl_k": wls, "k": ks}
).set_index("id")
material = RiiMaterial(0, catalog, data)
print(material.n(wl), material.k(wl), result)
self.assertAlmostEqual(material.n(wl).item(), result[0])
self.assertAlmostEqual(material.k(wl).item(), result[1])
def test_dispersion_formula_exception(self):
catalog = pd.DataFrame(
{
"book": "",
"page": "",
"formula": [1],
"tabulated": [""],
"wl_n_min": [0.25],
"wl_n_max": [2.0],
"wl_k_min": [0.25],
"wl_k_max": [2.0],
}
)
data = pd.DataFrame({"id": 0, "c": list(range(17))}).set_index("id")
material = RiiMaterial(0, catalog, data)
with self.assertRaises(ValueError):
material.n(0.1)
with self.assertRaises(ValueError):
material.k(0.1)
with self.assertRaises(ValueError):
material.n(2.1)
with self.assertRaises(ValueError):
material.k(2.1)
with self.assertRaises(ValueError):
material.n(np.array([0.1 * i for i in range(21)]))
with self.assertRaises(ValueError):
material.k(np.array([0.1 * i for i in range(21)]))
if __name__ == "__main__":
unittest.main()
| 5,619 | 2,325 |
# Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import oslo_messaging
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.handlers import resources_rpc
from neutron.services.logapi.common import constants as log_const
from neutron.services.logapi.rpc import server as server_rpc
from neutron.tests import base
class LoggingApiNotificationTestCase(base.BaseTestCase):
def setUp(self):
super(LoggingApiNotificationTestCase, self).setUp()
self.test_obj = server_rpc.LoggingApiNotification()
def test___init__(self):
self.assertIsInstance(self.test_obj.notification_api,
resources_rpc.ResourcesPushRpcApi)
@mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi."
"push")
def test_create_log(self, mocked_push):
m_context = mock.Mock()
m_log_resource = mock.Mock()
self.test_obj.create_log(m_context, m_log_resource)
mocked_push.assert_called_with(m_context, [m_log_resource],
events.CREATED)
@mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi."
"push")
def test_update_log(self, mocked_push):
m_context = mock.Mock()
m_log_resource = mock.Mock()
self.test_obj.update_log(m_context, m_log_resource)
mocked_push.assert_called_with(m_context, [m_log_resource],
events.UPDATED)
@mock.patch("neutron.api.rpc.handlers.resources_rpc.ResourcesPushRpcApi."
"push")
def test_delete_log(self, mocked_push):
m_context = mock.Mock()
m_log_resource = mock.Mock()
self.test_obj.delete_log(m_context, m_log_resource)
mocked_push.assert_called_with(m_context, [m_log_resource],
events.DELETED)
class LoggingApiSkeletonTestCase(base.BaseTestCase):
@mock.patch("neutron.common.rpc.get_server")
def test___init__(self, mocked_get_server):
test_obj = server_rpc.LoggingApiSkeleton()
_target = oslo_messaging.Target(
topic=log_const.LOGGING_PLUGIN,
server=cfg.CONF.host,
fanout=False)
mocked_get_server.assert_called_with(_target, [test_obj])
@mock.patch("neutron.services.logapi.common.db_api."
"get_sg_log_info_for_port")
def test_get_sg_log_info_for_port(self, mock_callback):
test_obj = server_rpc.LoggingApiSkeleton()
m_context = mock.Mock()
port_id = '123'
test_obj.get_sg_log_info_for_port(m_context, port_id=port_id)
mock_callback.assert_called_with(m_context, port_id)
@mock.patch("neutron.services.logapi.common.db_api."
"get_sg_log_info_for_log_resources")
def test_get_sg_log_info_for_log_resources(self, mock_callback):
test_obj = server_rpc.LoggingApiSkeleton()
m_context = mock.Mock()
log_resources = [mock.Mock()]
test_obj.get_sg_log_info_for_log_resources(m_context,
log_resources=log_resources)
mock_callback.assert_called_with(m_context, log_resources)
| 3,832 | 1,224 |
""" Unit Tests for Service Patch
"""
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import ipyradiant
import rdflib
LINKEDDATA_QUERY = """
SELECT DISTINCT ?s ?p ?o
WHERE {
SERVICE <http://linkeddata.uriburner.com/sparql>
{
SELECT ?s ?p ?o
WHERE {?s ?p ?o}
}
}
"""
PATCHED_LINKEDDATA_QUERY = """
SELECT DISTINCT ?s ?p ?o
WHERE {
service <http://linkeddata.uriburner.com/sparql>
{
SELECT ?s ?p ?o
WHERE {?s ?p ?o}
}
}
"""
def test_service_fix():
query_str = ipyradiant.service_patch_rdflib(LINKEDDATA_QUERY)
assert query_str == PATCHED_LINKEDDATA_QUERY
def test_rdflib_version():
version = rdflib.__version__
v_split = tuple(map(int, version.split(".")))
assert v_split <= (5, 0, 0)
| 942 | 338 |
import subprocess
from collections import OrderedDict
import pytest
from connect.cli.plugins.project import git
@pytest.mark.parametrize(
('str', 'result'),
(
('0.0.0', True),
('0.0.4', True),
('1.0.0', True),
('1.2.0', True),
('1.2.3', True),
('0.0', True),
('1.0', True),
('10.20', True),
('99999999999999999999999.999999999999999999.99999999999999999', True),
('v0.0.4', True),
('v1.2', True),
('v01.23', True),
('01.23', True),
('01.23a1', True),
('01.23b3', True),
('01.23rc1', False),
('1.1.2-prerelease+meta', False),
('1.v2.0', False),
('1.', False),
('v1', False),
),
)
def test_connect_version_tag(str, result):
assert (git.ConnectVersionTag(str).plain_tag is None) == result
@pytest.mark.parametrize(
'value',
(
'0.2.0',
'plain-tag-0.0.2a2',
),
)
def test_connect_version_tag_eq_comparison(value):
first = git.ConnectVersionTag(value)
assert first == value
assert first == git.ConnectVersionTag(value)
def test_connect_version_tag_invalid_comparison():
assert not git.ConnectVersionTag(str) == 10
def test_list_tags(mocker):
mock_subprocess_run = mocker.patch('connect.cli.plugins.project.git.subprocess.run')
mock_subprocess_called_process_error = mocker.patch(
'connect.cli.plugins.project.git.subprocess.CompletedProcess',
)
mock_subprocess_called_process_error.stdout = b"""commit1 refs/tags/21.1
commit2 refs/tags/21.10
commit3 refs/tags/21.11
commit4 refs/tags/21.9"""
mock_subprocess_run.return_value = mock_subprocess_called_process_error
tags = git._list_tags('dummy.repo')
assert tags == {'21.1': 'commit1', '21.10': 'commit2', '21.11': 'commit3', '21.9': 'commit4'}
def test_list_tags_error(mocker):
mock_subprocess_run = mocker.patch('connect.cli.plugins.project.git.subprocess.run')
mock_subprocess_called_process = mocker.patch(
'connect.cli.plugins.project.git.subprocess.CompletedProcess',
)
mock_subprocess_called_process.check_returncode.side_effect = subprocess.CalledProcessError(1, [])
mock_subprocess_run.return_value = mock_subprocess_called_process
with pytest.raises(git.GitException):
git._list_tags('dummy.repo')
@pytest.mark.parametrize(
('tags', 'expected'),
(
(
{'v21.1': 'cmt1', 'v21.10': 'cmt2', 'v21.11': 'cmt3', 'v21.9': 'cmt4'},
OrderedDict({'v21.1': 'cmt1', 'v21.9': 'cmt4', 'v21.10': 'cmt2', 'v21.11': 'cmt3'}),
),
(
{'21.1': 'cmt1', '21.10': 'cmt2', '21.11': 'cmt3', '21.9': 'cmt4'},
OrderedDict({'21.1': 'cmt1', '21.9': 'cmt4', '21.10': 'cmt2', '21.11': 'cmt3'}),
),
(
{'21.1': 'cmt1', '21.10': 'cmt2', '21.9': 'cmt4', '23.0.1a2': 'cmt3'},
OrderedDict({'21.1': 'cmt1', '21.9': 'cmt4', '21.10': 'cmt2'}),
),
(
{'21.1': 'cmt1', '21.10': 'cmt2', '21.9': 'cmt4', '21.0.1a2': 'cmt3'},
OrderedDict({'21.0.1a2': 'cmt3', '21.1': 'cmt1', '21.9': 'cmt4', '21.10': 'cmt2'}),
),
(
{'01.1': 'cmt1', '21.10': 'cmt2', '21.11': 'cmt3', '21.9': 'cmt4'},
OrderedDict({'21.9': 'cmt4', '21.10': 'cmt2', '21.11': 'cmt3'}),
),
(
{'v21.10not-a-tag': 'cmt5', '01.1': 'cmt1', '21.11': 'cmt3', '21.10': 'cmt2', 'v21.11': 'cmt4'},
OrderedDict(
{
'21.10': 'cmt2',
'21.11': 'cmt3',
'v21.11': 'cmt4',
},
),
),
(
{
'not-a-version-tag': 'cmt1',
'21.1': 'cmt5',
'21.10a1': 'cmt7alpha',
'21.10': 'cmt7',
'not-a-version-tag3': 'cmt3',
'22a2': 'cmt4',
'not-a-version-tag2': 'cmt2',
'21.9': 'cmt6',
'23.0.1a2': 'cmt8',
},
OrderedDict(
{
'21.1': 'cmt5',
'21.9': 'cmt6',
'21.10a1': 'cmt7alpha',
'21.10': 'cmt7',
},
),
),
({}, OrderedDict()),
),
)
def test_sort_and_filter_tags(tags, expected):
sorted_tags = git._sort_and_filter_tags(tags, '21')
assert sorted_tags == expected
@pytest.mark.parametrize(
('tags', 'cli_version', 'expected'),
(
(
b"""commit1 refs/tags/v21.1
commit2 refs/tags/v21.10
commit3 refs/tags/v21.11
commit4 refs/tags/v21.9""",
'21.4',
('v21.11', 'commit3'),
),
(
b"""commit1 refs/tags/21.1
commit2 refs/tags/21.10
commit3 refs/tags/21.11
commit4 refs/tags/21.9""",
'21.7',
('21.11', 'commit3'),
),
(
b"""commit4 refs/tags/22.0
commit1 refs/tags/21.3
commit2 refs/tags/21.2
commit3 refs/tags/21.1""",
'22.1',
('22.0', 'commit4'),
),
(
b"""commit4 refs/tags/22.0
commit1 refs/tags/21.3
commit2 refs/tags/21.2
commit3 refs/tags/21.1""",
'21.1',
('21.3', 'commit1'),
),
(
b"""commit4 refs/tags/22.0
commit1 refs/tags/21.3
commit2 refs/tags/21.2""",
'22.4',
('22.0', 'commit4'),
),
(
b"""commit4 refs/tags/01.0
commit1 refs/tags/0.0""",
'22.1',
(None, None),
),
(b"", '21.1', (None, None)),
),
)
def test_get_highest_version(mocker, tags, cli_version, expected):
mock_subprocess_run = mocker.patch('connect.cli.plugins.project.git.subprocess.run')
mock_subprocess_called_process_error = mocker.patch(
'connect.cli.plugins.project.git.subprocess.CalledProcessError',
)
mock_subprocess_called_process_error.stdout = tags
mock_subprocess_run.return_value = mock_subprocess_called_process_error
mocker.patch('connect.cli.plugins.project.git.get_version', return_value=cli_version)
assert expected == git.get_highest_version('dummy.repo')
| 6,552 | 2,600 |
# Created by Kelvin_Clark on 3/5/2022, 6:37 PM
from typing import Optional
from src.models.entities.user import User
from src import database as db
class UserDao:
@staticmethod
def get_user_by_email(email: str) -> Optional[User]:
return User.query.filter_by(email=email).first()
@staticmethod
def check_user_exists(email: str) -> bool:
"""
True if user exits, False if otherwise
:param email: str
:return: bool
"""
user = UserDao.get_user_by_email(email=email)
return user is not None
@staticmethod
def add_user(user: User) -> User:
db.session.add(user)
db.session.commit()
db.session.refresh(user)
return user
| 737 | 235 |
# Problem ID: dmpg18g1
# By Alexander Cai 2019-12-09
# Solved
import sys
FILLED = -1
EMPTY = 0
data = sys.stdin.read().split('\n')
n, k = map(int, data[0].split())
chairs = [FILLED for _ in range(n)]
for j in map(int, data[1].split()):
chairs[j-1] = EMPTY
for i, j in enumerate(map(int, data[2].split())):
if chairs[j-1] == EMPTY:
chairs[j-1] = FILLED
else:
chairs[j-1] = i+1 # index of student
s = []
nremaining = chairs.count(EMPTY)
i = 0
while nremaining > 0:
if chairs[i] == FILLED:
pass
elif chairs[i] == EMPTY:
if len(s) > 0:
s.pop()
chairs[i] = FILLED
nremaining -= 1
else: # student at this index -- underneath them is filled
s.append(chairs[i]) # add them to stack
chairs[i] = FILLED
i = (i+1) % n
print(s[0])
| 839 | 357 |
# -*- coding: utf-8 -*-
import unittest
import trafaret as t
from collections import Mapping as AbcMapping
from trafaret import extract_error, ignore, DataError
from trafaret.extras import KeysSubset
class TestAnyTrafaret(unittest.TestCase):
def test_any(self):
self.assertEqual(
(t.Any() >> ignore).check(object()),
None
)
class TestAtomTrafaret(unittest.TestCase):
def test_atom(self):
res = t.Atom('atom').check('atom')
self.assertEqual(res, 'atom')
err = extract_error(t.Atom('atom'), 'molecule')
self.assertEqual(err, "value is not exactly 'atom'")
class TestBoolTrafaret(unittest.TestCase):
def test_bool(self):
res = t.Bool().check(True)
self.assertEqual(res, True)
res = t.Bool().check(False)
self.assertEqual(res, False)
err = extract_error(t.Bool(), 1)
self.assertEqual(err, 'value should be True or False')
class TestCallTrafaret(unittest.TestCase):
def test_call(self):
def validator(value):
if value != "foo":
return t.DataError("I want only foo!")
return 'foo'
trafaret = t.Call(validator)
res = trafaret.check("foo")
self.assertEqual(res, 'foo')
err = extract_error(trafaret, "bar")
self.assertEqual(err, 'I want only foo!')
class TestCallableTrafaret(unittest.TestCase):
def test_callable(self):
(t.Callable() >> t.ignore).check(lambda: 1)
res = extract_error(t.Callable(), 1)
self.assertEqual(res, 'value is not callable')
class TestDictTrafaret(unittest.TestCase):
def test_base(self):
trafaret = t.Dict(foo=t.Int, bar=t.String) >> t.ignore
trafaret.check({"foo": 1, "bar": "spam"})
res = t.extract_error(trafaret, {"foo": 1, "bar": 2})
self.assertEqual(res, {'bar': 'value is not a string'})
res = extract_error(trafaret, {"foo": 1})
self.assertEqual(res, {'bar': 'is required'})
res = extract_error(trafaret, {"foo": 1, "bar": "spam", "eggs": None})
self.assertEqual(res, {'eggs': 'eggs is not allowed key'})
res = trafaret.allow_extra("eggs")
self.assertEqual(repr(res), '<Dict(extras=(eggs) | bar=<String>, foo=<Int>)>')
trafaret.check({"foo": 1, "bar": "spam", "eggs": None})
trafaret.check({"foo": 1, "bar": "spam"})
res = extract_error(trafaret, {"foo": 1, "bar": "spam", "ham": 100})
self.assertEqual(res, {'ham': 'ham is not allowed key'})
trafaret.allow_extra("*")
trafaret.check({"foo": 1, "bar": "spam", "ham": 100})
trafaret.check({"foo": 1, "bar": "spam", "ham": 100, "baz": None})
res = extract_error(trafaret, {"foo": 1, "ham": 100, "baz": None})
self.assertEqual(res, {'bar': 'is required'})
def test_old_keys(self):
class OldKey(object):
def pop(self, value):
data = value.pop('testkey')
yield 'testkey', data
def set_trafaret(self, trafaret):
pass
trafaret = t.Dict({
OldKey(): t.Any
})
res = trafaret.check({'testkey': 123})
self.assertEqual(res, {'testkey': 123})
def test_callable_key(self):
def simple_key(value):
yield 'simple', 'simple data', []
trafaret = t.Dict(simple_key)
res = trafaret.check({})
self.assertEqual(res, {'simple': 'simple data'})
def test_base2(self):
trafaret = t.Dict({t.Key('bar', optional=True): t.String}, foo=t.Int)
trafaret.allow_extra('*')
res = trafaret.check({"foo": 1, "ham": 100, "baz": None})
self.assertEqual(res, {'baz': None, 'foo': 1, 'ham': 100})
res = extract_error(trafaret, {"bar": 1, "ham": 100, "baz": None})
self.assertEqual(res, {'bar': 'value is not a string', 'foo': 'is required'})
res = extract_error(trafaret, {"foo": 1, "bar": 1, "ham": 100, "baz": None})
self.assertEqual(res, {'bar': 'value is not a string'})
def test_base3(self):
trafaret = t.Dict({t.Key('bar', default='nyanya') >> 'baz': t.String}, foo=t.Int)
res = trafaret.check({'foo': 4})
self.assertEqual(res, {'baz': 'nyanya', 'foo': 4})
trafaret.allow_extra('*')
res = trafaret.check({'baz': 'spam', 'foo': 4})
self.assertEqual(res, {'baz': 'nyanya', 'foo': 4})
trafaret.ignore_extra('fooz')
res = trafaret.check({'foo': 4, 'fooz': 5})
self.assertEqual(res, {'baz': 'nyanya', 'foo': 4})
trafaret.ignore_extra('*')
res = trafaret.check({'foo': 4, 'foor': 5})
self.assertEqual(res, {'baz': 'nyanya', 'foo': 4})
def test_add(self):
first = t.Dict({
t.Key('bar', default='nyanya') >> 'baz': t.String},
foo=t.Int)
second = t.Dict({
t.Key('bar1', default='nyanya') >> 'baz1': t.String},
foo1=t.Int)
third = first + second
res = third.check({'foo': 4, 'foo1': 41})
self.assertEqual(res, {'baz': 'nyanya', 'baz1': 'nyanya', 'foo': 4, 'foo1': 41})
def test_bad_add_names(self):
first = t.Dict({
t.Key('bar', default='nyanya') >> 'baz': t.String},
foo=t.Int)
second = t.Dict({
t.Key('bar1', default='nyanya') >> 'baz1': t.String},
foo=t.Int)
with self.assertRaises(ValueError):
first + second
def test_bad_add_to_names(self):
first = t.Dict({
t.Key('bar', default='nyanya') >> 'baz': t.String},
foo=t.Int)
second = t.Dict({
t.Key('bar1', default='nyanya') >> 'baz': t.String},
foo1=t.Int)
with self.assertRaises(ValueError):
first + second
def test_add_to_names_list_of_keys(self):
dct = t.Dict(key1=t.String)
dct + [t.Key('a', trafaret=t.String())]
def test_add_to_names_dict_of_keys(self):
dct = t.Dict(key1=t.String)
dct + {'a': t.String}
def test_mapping_interface(self):
trafaret = t.Dict({t.Key("foo"): t.String, t.Key("bar"): t.Float})
# class with mapping interface but not subclass of dict
class Map(AbcMapping):
def __init__(self, data, *a, **kw):
super(Map, self).__init__(*a, **kw)
self._data = data
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
for x in self._data:
yield x
def __len__(self):
return len(self._data)
trafaret.check(Map({"foo": "xxx", "bar": 0.1}))
res = extract_error(trafaret, object())
self.assertEqual(res, "value is not a dict")
res = extract_error(trafaret, Map({"foo": "xxx"}))
self.assertEqual(res, {'bar': 'is required'})
res = extract_error(trafaret, Map({"foo": "xxx", "bar": 'str'}))
self.assertEqual(res, {'bar': "value can't be converted to float"})
class TestDictKeys(unittest.TestCase):
def test_dict_keys(self):
res = t.DictKeys(['a', 'b']).check({'a': 1, 'b': 2})
self.assertEqual(res, {'a': 1, 'b': 2})
res = extract_error(t.DictKeys(['a', 'b']), {'a': 1, 'b': 2, 'c': 3})
self.assertEqual(res, {'c': 'c is not allowed key'})
res = extract_error(t.DictKeys(['key', 'key2']), {'key': 'val'})
self.assertEqual(res, {'key2': 'is required'})
class TestEmailTrafaret(unittest.TestCase):
def test_email(self):
res = t.Email().check('someone@example.net')
self.assertEqual(res, 'someone@example.net')
res = extract_error(t.Email(),'someone@example') # try without domain-part
self.assertEqual(res, 'value is not a valid email address')
res = str(t.Email().check('someone@пример.рф')) # try with `idna` encoding
self.assertEqual(res, 'someone@xn--e1afmkfd.xn--p1ai')
res = (t.Email() >> (lambda m: m.groupdict()['domain'])).check('someone@example.net')
self.assertEqual(res, 'example.net')
res = extract_error(t.Email(), 'foo')
self.assertEqual(res, 'value is not a valid email address')
res = extract_error(t.Email(), 'f' * 10000 + '@correct.domain.edu')
self.assertEqual(res, 'value is not a valid email address')
res = extract_error(t.Email(), 'f' * 248 + '@x.edu') == 'f' * 248 + '@x.edu'
self.assertEqual(res, True)
class TestEnumTrafaret(unittest.TestCase):
def test_enum(self):
trafaret = t.Enum("foo", "bar", 1) >> ignore
self.assertEqual(repr(trafaret), "<Enum('foo', 'bar', 1)>")
res = trafaret.check("foo")
res = trafaret.check(1)
res = extract_error(trafaret, 2)
self.assertEqual(res, "value doesn't match any variant")
class TestFloat(unittest.TestCase):
def test_float_repr(self):
res = t.Float()
self.assertEqual(repr(res), '<Float>')
res = t.Float(gte=1)
self.assertEqual(repr(res), '<Float(gte=1)>')
res = t.Float(lte=10)
self.assertEqual(repr(res), '<Float(lte=10)>')
res = t.Float(gte=1, lte=10)
self.assertEqual(repr(res), '<Float(gte=1, lte=10)>')
def test_float(self):
res = t.Float().check(1.0)
self.assertEqual(res, 1.0)
res = extract_error(t.Float(), 1 + 3j)
self.assertEqual(res, 'value is not float')
res = extract_error(t.Float(), 1)
self.assertEqual(res, 1.0)
res = t.Float(gte=2).check(3.0)
self.assertEqual(res, 3.0)
res = extract_error(t.Float(gte=2), 1.0)
self.assertEqual(res, 'value is less than 2')
res = t.Float(lte=10).check(5.0)
self.assertEqual(res, 5.0)
res = extract_error(t.Float(lte=3), 5.0)
self.assertEqual(res, 'value is greater than 3')
res = t.Float().check("5.0")
self.assertEqual(res, 5.0)
class TestForwardTrafaret(unittest.TestCase):
def test_forward(self):
node = t.Forward()
node << t.Dict(name=t.String, children=t.List[node])
self.assertEqual(repr(node), '<Forward(<Dict(children=<List(<recur>)>, name=<String>)>)>')
res = node.check({"name": "foo", "children": []}) == {'children': [], 'name': 'foo'}
self.assertEqual(res, True)
res = extract_error(node, {"name": "foo", "children": [1]})
self.assertEqual(res, {'children': {0: 'value is not a dict'}})
res = node.check({"name": "foo", "children": [{"name": "bar", "children": []}]})
self.assertEqual(res, {'children': [{'children': [], 'name': 'bar'}], 'name': 'foo'})
empty_node = t.Forward()
self.assertEqual(repr(empty_node), '<Forward(None)>')
res = extract_error(empty_node, 'something')
self.assertEqual(res, 'trafaret not set yet')
class TestIntTrafaret(unittest.TestCase):
def test_int(self):
res = repr(t.Int())
self.assertEqual(res, '<Int>')
res = t.Int().check(5)
self.assertEqual(res, 5)
res = extract_error(t.Int(), 1.1)
self.assertEqual(res, 'value is not int')
res = extract_error(t.Int(), 1 + 1j)
self.assertEqual(res, 'value is not int')
class TestKey(unittest.TestCase):
def test_key(self):
default = lambda: 1
res = t.Key(name='test', default=default)
self.assertEqual(repr(res), '<Key "test">')
res = next(t.Key(name='test', default=default)({}))
self.assertEqual(res, ('test', 1, ('test',)))
res = next(t.Key(name='test', default=2)({}))
self.assertEqual(res, ('test', 2, ('test',)))
default = lambda: None
res = next(t.Key(name='test', default=default)({}))
self.assertEqual(res, ('test', None, ('test',)))
res = next(t.Key(name='test', default=None)({}))
self.assertEqual(res, ('test', None, ('test',)))
# res = next(t.Key(name='test').pop({}))
# self.assertEqual(res, ('test', DataError(is required)))
res = list(t.Key(name='test', optional=True)({}))
self.assertEqual(res, [])
class TestList(unittest.TestCase):
def test_list_repr(self):
res = t.List(t.Int)
self.assertEqual(repr(res), '<List(<Int>)>')
res = t.List(t.Int, min_length=1)
self.assertEqual(repr(res), '<List(min_length=1 | <Int>)>')
res = t.List(t.Int, min_length=1, max_length=10)
self.assertEqual(repr(res), '<List(min_length=1, max_length=10 | <Int>)>')
def test_list(self):
res = extract_error(t.List(t.Int), 1)
self.assertEqual(res, 'value is not a list')
res = t.List(t.Int).check([1, 2, 3])
self.assertEqual(res, [1, 2, 3])
res = t.List(t.String).check(["foo", "bar", "spam"])
self.assertEqual(res, ['foo', 'bar', 'spam'])
res = extract_error(t.List(t.Int), [1, 2, 1 + 3j])
self.assertEqual(res, {2: 'value is not int'})
res = t.List(t.Int, min_length=1).check([1, 2, 3])
self.assertEqual(res, [1, 2, 3])
res = extract_error(t.List(t.Int, min_length=1), [])
self.assertEqual(res, 'list length is less than 1')
res = t.List(t.Int, max_length=2).check([1, 2])
self.assertEqual(res, [1, 2])
res = extract_error(t.List(t.Int, max_length=2), [1, 2, 3])
self.assertEqual(res, 'list length is greater than 2')
res = extract_error(t.List(t.Int), ["a"])
self.assertEqual(res, {0: "value can't be converted to int"})
def test_list_meta(self):
res = t.List[t.Int]
self.assertEqual(repr(res), '<List(<Int>)>')
res = t.List[t.Int, 1:]
self.assertEqual(repr(res), '<List(min_length=1 | <Int>)>')
res = t.List[:10, t.Int]
self.assertEqual(repr(res), '<List(max_length=10 | <Int>)>')
# TODO
# res = t.List[1:10]
# self.assertEqual(res, Traceback (most recent call last):
# ...
# RuntimeError: Trafaret is required for List initialization
class TestMappingTrafaret(unittest.TestCase):
def test_mapping(self):
trafaret = t.Mapping(t.String, t.Int)
self.assertEqual(repr(trafaret), '<Mapping(<String> => <Int>)>')
res = trafaret.check({"foo": 1, "bar": 2})
self.assertEqual(res, {'bar': 2, 'foo': 1})
res = extract_error(trafaret, {"foo": 1, "bar": None})
self.assertEqual(res, {'bar': {'value': 'value is not int'}})
res = extract_error(trafaret, {"foo": 1, 2: "bar"})
self.assertEqual(res, {2: {'key': 'value is not a string', 'value': "value can't be converted to int"}})
res = extract_error(trafaret.check, None)
self.assertEqual(res, 'value is not a dict')
class TestNullTrafaret(unittest.TestCase):
def test_null(self):
res = t.Null()
self.assertEqual(repr(res), '<Null>')
res = t.Null().check(None)
res = extract_error(t.Null(), 1)
self.assertEqual(res, 'value should be None')
class TestNumMeta(unittest.TestCase):
def test_num_meta_repr(self):
res = t.Int[1:]
self.assertEqual(repr(res), '<Int(gte=1)>')
res = t.Int[1:10]
self.assertEqual(repr(res), '<Int(gte=1, lte=10)>')
res = t.Int[:10]
self.assertEqual(repr(res), '<Int(lte=10)>')
res = t.Float[1:]
self.assertEqual(repr(res), '<Float(gte=1)>')
res = t.Int > 3
self.assertEqual(repr(res), '<Int(gt=3)>')
res = 1 < (t.Float < 10)
self.assertEqual(repr(res), '<Float(gt=1, lt=10)>')
def test_meta_res(self):
res = (t.Int > 5).check(10)
self.assertEqual(res, 10)
res = extract_error(t.Int > 5, 1)
self.assertEqual(res, 'value should be greater than 5')
res = (t.Int < 3).check(1)
self.assertEqual(res, 1)
res = extract_error(t.Int < 3, 3)
self.assertEqual(res, 'value should be less than 3')
class TestOrNotToTest(unittest.TestCase):
def test_or(self):
nullString = t.Or(t.String, t.Null)
self.assertEqual(repr(nullString), '<Or(<String>, <Null>)>')
res = nullString.check(None)
res = nullString.check("test")
self.assertEqual(res, 'test')
res = extract_error(nullString, 1)
self.assertEqual(res, {0: 'value is not a string', 1: 'value should be None'})
res = t.Or << t.Int << t.String
self.assertEqual(repr(res), '<Or(<Int>, <String>)>')
class TestStrBoolTrafaret(unittest.TestCase):
def test_str_bool(self):
res = extract_error(t.StrBool(), 'aloha')
self.assertEqual(res, "value can't be converted to Bool")
res = t.StrBool().check(1)
self.assertEqual(res, True)
res = t.StrBool().check(0)
self.assertEqual(res, False)
res = t.StrBool().check('y')
self.assertEqual(res, True)
res = t.StrBool().check('n')
self.assertEqual(res, False)
res = t.StrBool().check(None)
self.assertEqual(res, False)
res = t.StrBool().check('1')
self.assertEqual(res, True)
res = t.StrBool().check('0')
self.assertEqual(res, False)
res = t.StrBool().check('YeS')
self.assertEqual(res, True)
res = t.StrBool().check('No')
self.assertEqual(res, False)
res = t.StrBool().check(True)
self.assertEqual(res, True)
res = t.StrBool().check(False)
self.assertEqual(res, False)
class TestStringTrafaret(unittest.TestCase):
def test_string(self):
res = t.String()
self.assertEqual(repr(res), '<String>')
res = t.String(allow_blank=True)
self.assertEqual(repr(res), '<String(blank)>')
res = t.String().check("foo")
self.assertEqual(res, 'foo')
res = extract_error(t.String(), "")
self.assertEqual(res, 'blank value is not allowed')
res = t.String(allow_blank=True).check("")
self.assertEqual(res, '')
res = extract_error(t.String(), 1)
self.assertEqual(res, 'value is not a string')
res = t.String(regex='\w+').check('wqerwqer')
self.assertEqual(res, 'wqerwqer')
res = extract_error(t.String(regex='^\w+$'), 'wqe rwqer')
self.assertEqual(res, "value does not match pattern: '^\\\\w+$'")
res = t.String(min_length=2, max_length=3).check('123')
self.assertEqual(res, '123')
res = extract_error(t.String(min_length=2, max_length=6), '1')
self.assertEqual(res, 'String is shorter than 2 characters')
res = extract_error(t.String(min_length=2, max_length=6), '1234567')
self.assertEqual(res, 'String is longer than 6 characters')
# TODO
# res = String(min_length=2, max_length=6, allow_blank=True)
# self.assertEqual(res, Traceback (most recent call last):
# ...
# AssertionError: Either allow_blank or min_length should be specified, not both
res = t.String(min_length=0, max_length=6, allow_blank=True).check('123')
self.assertEqual(res, '123')
class TestTrafaretMeta(unittest.TestCase):
def test_meta(self):
res = (t.Int() >> (lambda x: x * 2) >> (lambda x: x * 3)).check(1)
self.assertEqual(res, 6)
res = (t.Int() >> float >> str).check(4)
self.assertEqual(res, '4.0')
res = t.Int | t.String
self.assertEqual(repr(res), '<Or(<Int>, <String>)>')
res = t.Int | t.String | t.Null
self.assertEqual(repr(res), '<Or(<Int>, <String>, <Null>)>')
res = (t.Int >> (lambda v: v if v ** 2 > 15 else 0)).check(5)
self.assertEqual(res, 5)
class TestTupleTrafaret(unittest.TestCase):
def test_tuple(self):
tup = t.Tuple(t.Int, t.Int, t.String)
self.assertEqual(repr(tup), '<Tuple(<Int>, <Int>, <String>)>')
res = tup.check([3, 4, '5'])
self.assertEqual(res, (3, 4, '5'))
res = extract_error(tup, [3, 4, 5])
self.assertEqual(res, {2: 'value is not a string'})
class TestTypeTrafaret(unittest.TestCase):
def test_type(self):
res = t.Type(int)
self.assertEqual(repr(res), '<Type(int)>')
c = t.Type[int]
res = c.check(1)
self.assertEqual(res, 1)
res = extract_error(c, "foo")
self.assertEqual(res, 'value is not int')
class TestSubclassTrafaret(unittest.TestCase):
def test_subclass(self):
res = t.Subclass(type)
self.assertEqual(repr(res), '<Subclass(type)>')
c = t.Subclass[type]
class Type(type):
pass
res = c.check(Type)
self.assertEqual(res, Type)
res = extract_error(c, object)
self.assertEqual(res, 'value is not subclass of type')
class TestURLTrafaret(unittest.TestCase):
def test_url(self):
res = t.URL().check('http://example.net/resource/?param=value#anchor')
self.assertEqual(res, 'http://example.net/resource/?param=value#anchor')
res = str(t.URL().check('http://пример.рф/resource/?param=value#anchor'))
self.assertEqual(res, 'http://xn--e1afmkfd.xn--p1ai/resource/?param=value#anchor')
class TestKeysSubset(unittest.TestCase):
def test_keys_subset(self):
cmp_pwds = lambda x: {'pwd': x['pwd'] if x.get('pwd') == x.get('pwd1') else t.DataError('Not equal')}
d = t.Dict({KeysSubset('pwd', 'pwd1'): cmp_pwds, 'key1': t.String})
res = d.check({'pwd': 'a', 'pwd1': 'a', 'key1': 'b'}).keys()
self.assertEqual(list(sorted(res)), ['key1', 'pwd'])
res = extract_error(d.check, {'pwd': 'a', 'pwd1': 'c', 'key1': 'b'})
self.assertEqual(res, {'pwd': 'Not equal'})
res = extract_error(d.check, {'pwd': 'a', 'pwd1': None, 'key1': 'b'})
self.assertEqual(res, {'pwd': 'Not equal'})
get_values = (lambda d, keys: [d[k] for k in keys if k in d])
join = (lambda d: {'name': ' '.join(get_values(d, ['name', 'last']))})
res = t.Dict({KeysSubset('name', 'last'): join}).check({'name': 'Adam', 'last': 'Smith'})
self.assertEqual(res, {'name': 'Adam Smith'})
res = t.Dict({KeysSubset(): t.Dict({'a': t.Any})}).check({'a': 3})
self.assertEqual(res, {'a': 3})
class TestDataError(unittest.TestCase):
def test_dataerror_value(self):
error = t.DataError(error='Wait for good value', value='BAD ONE')
self.assertEqual(
error.as_dict(),
'Wait for good value'
)
self.assertEqual(
error.as_dict(value=True),
"Wait for good value, got 'BAD ONE'"
)
# res = @guard(a=String, b=Int, c=String)
# def fn(a, b, c="default"):
# '''docstring'''
# return (a, b, c)
# res = fn.__module__ = None
# res = help(fn)
# self.assertEqual(res, Help on function fn:
# <BLANKLINE>
# fn(*args, **kwargs)
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# **********************************************************************
# File "/Users/mkrivushin/w/trafaret/trafaret/__init__.py", line 1260, in trafaret.guard
# Failed example:
# help(fn)
# Expected:
# Help on function fn:
# <BLANKLINE>
# fn(*args, **kwargs)
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# Got:
# Help on function fn:
# <BLANKLINE>
# fn(a, b, c='default')
# guarded with <Dict(a=<String>, b=<Int>, c=<String>)>
# <BLANKLINE>
# docstring
# <BLANKLINE>
# res = fn("foo", 1)
# self.assertEqual(res, ('foo', 1, 'default')
# res = extract_error(fn, "foo", 1, 2)
# self.assertEqual(res, {'c': 'value is not a string'}
# res = extract_error(fn, "foo")
# self.assertEqual(res, {'b': 'is required'}
# res = g = guard(Dict())
# res = c = Forward()
# res = c << Dict(name=str, children=List[c])
# res = g = guard(c)
# res = g = guard(Int())
# self.assertEqual(res, Traceback (most recent call last):
# ...
# RuntimeError: trafaret should be instance of Dict or Forward
# res = a = Int >> ignore
# res = a.check(7)
# ***Test Failed*** 2 failures.
# res = _dd(fold({'a__a': 4}))
# self.assertEqual(res, "{'a': {'a': 4}}"
# res = _dd(fold({'a__a': 4, 'a__b': 5}))
# self.assertEqual(res, "{'a': {'a': 4, 'b': 5}}"
# res = _dd(fold({'a__1': 2, 'a__0': 1, 'a__2': 3}))
# self.assertEqual(res, "{'a': [1, 2, 3]}"
# res = _dd(fold({'form__a__b': 5, 'form__a__a': 4}, 'form'))
# self.assertEqual(res, "{'a': {'a': 4, 'b': 5}}"
# res = _dd(fold({'form__a__b': 5, 'form__a__a__0': 4, 'form__a__a__1': 7}, 'form'))
# self.assertEqual(res, "{'a': {'a': [4, 7], 'b': 5}}"
# res = repr(fold({'form__1__b': 5, 'form__0__a__0': 4, 'form__0__a__1': 7}, 'form'))
# self.assertEqual(res, "[{'a': [4, 7]}, {'b': 5}]"
# res = _dd(unfold({'a': 4, 'b': 5}))
# self.assertEqual(res, "{'a': 4, 'b': 5}"
# res = _dd(unfold({'a': [1, 2, 3]}))
# self.assertEqual(res, "{'a__0': 1, 'a__1': 2, 'a__2': 3}"
# res = _dd(unfold({'a': {'a': 4, 'b': 5}}))
# self.assertEqual(res, "{'a__a': 4, 'a__b': 5}"
# res = _dd(unfold({'a': {'a': 4, 'b': 5}}, 'form'))
# self.assertEqual(res, "{'form__a__a': 4, 'form__a__b': 5}"
# res = from trafaret import Int
# res = class A(object):
# class B(object):
# d = {'a': 'word'}
# res = dict((DeepKey('B.d.a') >> 'B_a').pop(A))
# self.assertEqual(res, {'B_a': 'word'}
# res = dict((DeepKey('c.B.d.a') >> 'B_a').pop({'c': A}))
# self.assertEqual(res, {'B_a': 'word'}
# res = dict((DeepKey('B.a') >> 'B_a').pop(A))
# self.assertEqual(res, {'B.a': DataError(Unexistent key)}
# res = dict(DeepKey('c.B.d.a', to_name='B_a', trafaret=Int()).pop({'c': A}))
# self.assertEqual(res, {'B_a': DataError(value can't be converted to int)}
| 25,884 | 9,514 |
# coding: utf-8
# This file is part of libdesktop
# The MIT License (MIT)
#
# Copyright (c) 2016 Bharadwaj Raju
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import subprocess as sp
from libdesktop import system
import sys
def construct(name, exec_, terminal=False, additional_opts={}):
'''Construct a .desktop file and return it as a string.
Create a standards-compliant .desktop file, returning it as a string.
Args:
name (str) : The program's name.
exec\_ (str) : The command.
terminal (bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``.
additional_opts (dict): Any additional fields.
Returns:
str: The constructed .desktop file.
'''
desktop_file = '[Desktop Entry]\n'
desktop_file_dict = {
'Name': name,
'Exec': exec_,
'Terminal': 'true' if terminal else 'false',
'Comment': additional_opts.get('Comment', name)
}
desktop_file = ('[Desktop Entry]\nName={name}\nExec={exec_}\n'
'Terminal={terminal}\nComment={comment}\n')
desktop_file = desktop_file.format(name=desktop_file_dict['Name'],
exec_=desktop_file_dict['Exec'],
terminal=desktop_file_dict['Terminal'],
comment=desktop_file_dict['Comment'])
if additional_opts is None:
additional_opts = {}
for option in additional_opts:
if not option in desktop_file_dict:
desktop_file += '%s=%s\n' % (option, additional_opts[option])
return desktop_file
def execute(desktop_file, files=None, return_cmd=False, background=False):
'''Execute a .desktop file.
Executes a given .desktop file path properly.
Args:
desktop_file (str) : The path to the .desktop file.
files (list): Any files to be launched by the .desktop. Defaults to empty list.
return_cmd (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``.
background (bool): Run command in background. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``. Returns command instead of running it. Else returns nothing.
'''
# Attempt to manually parse and execute
desktop_file_exec = parse(desktop_file)['Exec']
for i in desktop_file_exec.split():
if i.startswith('%'):
desktop_file_exec = desktop_file_exec.replace(i, '')
desktop_file_exec = desktop_file_exec.replace(r'%F', '')
desktop_file_exec = desktop_file_exec.replace(r'%f', '')
if files:
for i in files:
desktop_file_exec += ' ' + i
if parse(desktop_file)['Terminal']:
# Use eval and __import__ to bypass a circular dependency
desktop_file_exec = eval(
('__import__("libdesktop").applications.terminal(exec_="%s",'
' keep_open_after_cmd_exec=True, return_cmd=True)') %
desktop_file_exec)
if return_cmd:
return desktop_file_exec
desktop_file_proc = sp.Popen([desktop_file_exec], shell=True)
if not background:
desktop_file_proc.wait()
def locate(desktop_filename_or_name):
'''Locate a .desktop from the standard locations.
Find the path to the .desktop file of a given .desktop filename or application name.
Standard locations:
- ``~/.local/share/applications/``
- ``/usr/share/applications``
Args:
desktop_filename_or_name (str): Either the filename of a .desktop file or the name of an application.
Returns:
list: A list of all matching .desktop files found.
'''
paths = [
os.path.expanduser('~/.local/share/applications'),
'/usr/share/applications']
result = []
for path in paths:
for file in os.listdir(path):
if desktop_filename_or_name in file.split(
'.') or desktop_filename_or_name == file:
# Example: org.gnome.gedit
result.append(os.path.join(path, file))
else:
file_parsed = parse(os.path.join(path, file))
try:
if desktop_filename_or_name.lower() == file_parsed[
'Name'].lower():
result.append(file)
elif desktop_filename_or_name.lower() == file_parsed[
'Exec'].split(' ')[0]:
result.append(file)
except KeyError:
pass
for res in result:
if not res.endswith('.desktop'):
result.remove(res)
if not result and not result.endswith('.desktop'):
result.extend(locate(desktop_filename_or_name + '.desktop'))
return result
def parse(desktop_file_or_string):
'''Parse a .desktop file.
Parse a .desktop file or a string with its contents into an easy-to-use dict, with standard values present even if not defined in file.
Args:
desktop_file_or_string (str): Either the path to a .desktop file or a string with a .desktop file as its contents.
Returns:
dict: A dictionary of the parsed file.'''
if os.path.isfile(desktop_file_or_string):
with open(desktop_file_or_string) as f:
desktop_file = f.read()
else:
desktop_file = desktop_file_or_string
result = {}
for line in desktop_file.split('\n'):
if '=' in line:
result[line.split('=')[0]] = line.split('=')[1]
for key, value in result.items():
if value == 'false':
result[key] = False
elif value == 'true':
result[key] = True
if not 'Terminal' in result:
result['Terminal'] = False
if not 'Hidden' in result:
result['Hidden'] = False
return result
| 6,107 | 2,169 |
import os
import cv2
import keras
import numpy as np
import albumentations as A
import tensorflow as tf
from keras import backend as K
def jaccard_distance(y_true, y_pred, smooth=100):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return (1 - jac) * smooth
def iou(y_true, y_pred, label: int):
"""
Return the Intersection over Union (IoU) for a given label.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
label: the label to return the IoU for
Returns:
the IoU for the given label
"""
# extract the label values using the argmax operator then
# calculate equality of the predictions and truths to the label
y_true = K.cast(K.equal(K.argmax(y_true), label), K.floatx())
y_pred = K.cast(K.equal(K.argmax(y_pred), label), K.floatx())
# calculate the |intersection| (AND) of the labels
intersection = K.sum(y_true * y_pred)
# calculate the |union| (OR) of the labels
union = K.sum(y_true) + K.sum(y_pred) - intersection
# avoid divide by zero - if the union is zero, return 1
# otherwise, return the intersection over union
return K.switch(K.equal(union, 0), 1.0, intersection / union)
def mean_iou(y_true, y_pred):
"""
Return the Intersection over Union (IoU) score.
Args:
y_true: the expected y values as a one-hot
y_pred: the predicted y values as a one-hot or softmax output
Returns:
the scalar IoU value (mean over all labels)
"""
# get number of labels to calculate IoU for
num_labels = K.int_shape(y_pred)[-1]
# initialize a variable to store total IoU in
total_iou = K.variable(0)
# iterate over labels to calculate IoU for
for label in range(num_labels):
total_iou = total_iou + iou(y_true, y_pred, label)
# divide total IoU by number of labels to get mean IoU
return total_iou / num_labels
def IoU(y_pred, y_true):
I = tf.reduce_sum(y_pred * y_true, axis=(1, 2))
U = tf.reduce_sum(y_pred + y_true, axis=(1, 2)) - I
return tf.reduce_mean(I / U)
# def iou_metric(y_true, y_pred):
SMOOTH = 1e-01
def iou_coef(y_true, y_pred, smooth=SMOOTH):
"""
IoU = (|X & Y|)/ (|X or Y|)
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
union = K.sum((y_true,-1) + K.sum(y_pred,-1)) - intersection
return (intersection + smooth) / (union + smooth)
# return iou_coef
def dice_coef(y_true, y_pred, smooth=1e-01):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
return (2. * intersection + smooth) / (K.sum(K.square(y_true),-1) + K.sum(K.square(y_pred),-1) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred) | 2,887 | 1,046 |
#!/usr/bin/env python
#
# A minimal Python language binding for the OpsRamp REST API.
#
# monitoring.py
# Classes related to monitoring templates and similar things.
#
# (c) Copyright 2019-2021 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opsramp.api import ORapi
class Monitoring(ORapi):
def __init__(self, parent):
super(Monitoring, self).__init__(parent.api, 'monitoring')
def templates(self):
return Templates(self)
class Templates(ORapi):
def __init__(self, parent):
super(Templates, self).__init__(parent.api, 'templates')
| 1,119 | 339 |
from client import exceptions as ex
from client.sources import doctest
from client.sources.doctest import models
import mock
import unittest
import os.path
class LoadTest(unittest.TestCase):
VALID_FILE = 'valid.py'
VALID_MODULE = os.path.splitext(VALID_FILE)[0]
INVALID_FILE = 'invalid.ext'
FUNCTION = 'function'
METHOD = 'cls.method'
def setUp(self):
self.patcherIsFile = mock.patch('os.path.isfile')
self.addCleanup(self.patcherIsFile.stop)
self.mockIsFile = self.patcherIsFile.start()
self.mockIsFile.return_value = True
self.patcherLoadModule = mock.patch('client.sources.common.importing.load_module')
self.addCleanup(self.patcherLoadModule.stop)
self.mockLoadModule = self.patcherLoadModule.start()
self.mockModule = mock.MagicMock()
self.mockLoadModule.return_value = self.mockModule
self.cls = mock.Mock()
self.mockFunction = mock.Mock()
self.assign = mock.Mock()
def call_load(self, file=VALID_FILE, name=''):
return doctest.load(file, name, self.assign)
def testFileDoesNotExist(self):
self.mockIsFile.return_value = False
self.assertRaises(ex.LoadingException, self.call_load)
def testInvalidFileType(self):
self.assertRaises(ex.LoadingException, self.call_load,
file=self.INVALID_FILE)
def testImportError(self):
self.mockLoadModule.side_effect = Exception
self.assertRaises(ex.LoadingException, self.call_load)
def testNoFunctions(self):
self.mockModule.__dir__ = lambda *args: []
result = self.call_load()
self.assertEqual({}, result)
def testSpecificFunction(self):
setattr(self.mockModule, self.FUNCTION, self.mockFunction)
self.mockFunction.__doc__ = """
>>> 1 + 2
3
"""
result = self.call_load(name=self.FUNCTION)
self.assertIsInstance(result, dict)
self.assertEqual(1, len(result))
self.assertIn(self.FUNCTION, result)
self.assertIsInstance(result[self.FUNCTION], models.Doctest)
def testSpecificFunction_noDoctest(self):
setattr(self.mockModule, self.FUNCTION, self.mockFunction)
self.mockFunction.__doc__ = None
result = self.call_load(name=self.FUNCTION)
self.assertIsInstance(result, dict)
self.assertEqual(1, len(result))
self.assertIn(self.FUNCTION, result)
self.assertIsInstance(result[self.FUNCTION], models.Doctest)
def testAllFunctions(self):
self.mockModule.__dir__ = lambda *args: [self.FUNCTION]
setattr(self.mockModule, self.FUNCTION, self.mockFunction)
self.mockFunction.__doc__ = """
>>> 1 + 2
3
"""
self.mockModule.__name__ = self.mockFunction.__module__ = self.VALID_MODULE
result = self.call_load()
self.assertIsInstance(result, dict)
self.assertEqual(1, len(result))
self.assertIn(self.FUNCTION, result)
self.assertIsInstance(result[self.FUNCTION], models.Doctest)
def testSpecificMethod(self):
self.mockFunction.__doc__ = """
>>> 1 + 2
3
"""
setattr(self.cls, self.METHOD.split('.')[1], self.mockFunction)
setattr(self.mockModule, self.METHOD.split('.')[0], self.cls)
result = self.call_load(name=self.METHOD)
self.assertIsInstance(result, dict)
self.assertEqual(1, len(result))
self.assertIn(self.METHOD, result)
self.assertIsInstance(result[self.METHOD], models.Doctest)
def _testOnlyModuleFunctions(self, filter_fn):
fn_names = []
mock_fns = []
expected = []
for i in range(3):
fn_name = 'f{}'.format(i)
fn_names.append(fn_name)
mock_fn = mock.Mock()
mock_fn.__doc__ = """
>>> {0} + {1}
{2}
""".format(i, i*2, i*3)
mock_fns.append(mock_fn)
if filter_fn(i):
mock_fn.__module__ = self.VALID_MODULE
expected.append(fn_name)
else:
mock_fn.__module__ = 'other'
self.mockModule.__name__ = self.VALID_MODULE
self.mockModule.__dir__ = lambda *args: fn_names
for fn_name, mock_fn in zip(fn_names, mock_fns):
setattr(self.mockModule, fn_name, mock_fn)
result = self.call_load()
self.assertIsInstance(result, dict)
self.assertEqual(len(expected), len(result))
for fn_name in expected:
self.assertIn(fn_name, result)
self.assertIsInstance(result[fn_name], models.Doctest)
def testOnlyModuleFunctions_allModuleFunctions(self):
self._testOnlyModuleFunctions(lambda i: True)
def testOnlyModuleFunctions_someModuleFunctions(self):
self._testOnlyModuleFunctions(lambda i: i % 2 == 0)
def testOnlyModuleFunctions_noModuleFunctions(self):
self._testOnlyModuleFunctions(lambda i: False)
| 5,045 | 1,584 |
#!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_cosmosdbaccount_info
short_description: Get Azure Cosmos DB Account facts
description:
- Get facts of Azure Cosmos DB Account.
options:
resource_group:
description:
- Name of an Azure resource group.
name:
description:
- Cosmos DB database account name.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
retrieve_keys:
description:
- Retrieve keys and connection strings.
type: str
choices:
- all
- readonly
retrieve_connection_strings:
description:
- Retrieve connection strings.
type: bool
extends_documentation_fragment:
- azure.azcollection.azure
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Get instance of Database Account
community.azure.azure_rm_cosmosdbaccount_info:
resource_group: myResourceGroup
name: testaccount
- name: List instances of Database Account
azure_rm_cosmosdbaccousnt_info:
resource_group: myResourceGroup
'''
RETURN = '''
accounts:
description: A list of dictionaries containing facts for Database Account.
returned: always
type: complex
contains:
id:
description:
- The unique resource identifier of the database account.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccount
s/testaccount"
resource_group:
description:
- Name of an Azure resource group.
returned: always
type: str
sample: myResourceGroup
name:
description:
- The name of the database account.
returned: always
type: str
sample: testaccount
location:
description:
- The location of the resource group to which the resource belongs.
returned: always
type: str
sample: westus
kind:
description:
- Indicates the type of database account.
returned: always
type: str
sample: global_document_db
consistency_policy:
description:
- Consistency policy.
returned: always
type: complex
contains:
default_consistency_level:
description:
- Default consistency level.
returned: always
type: str
sample: session
max_interval_in_seconds:
description:
- Maximum interval in seconds.
returned: always
type: int
sample: 5
max_staleness_prefix:
description:
- Maximum staleness prefix.
returned: always
type: int
sample: 100
failover_policies:
description:
- The list of new failover policies for the failover priority change.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
read_locations:
description:
- Read locations.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount-eastus.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state.
returned: always
type: str
sample: Succeeded
write_locations:
description:
- Write locations.
returned: always
type: complex
contains:
name:
description:
- Location name.
returned: always
type: str
sample: eastus
failover_priority:
description:
- Failover priority.
returned: always
type: int
sample: 0
id:
description:
- Read location ID.
returned: always
type: str
sample: testaccount-eastus
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount-eastus.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state.
returned: always
type: str
sample: Succeeded
database_account_offer_type:
description:
- Offer type.
returned: always
type: str
sample: Standard
ip_range_filter:
description:
- Enable IP range filter.
returned: always
type: str
sample: 10.10.10.10
is_virtual_network_filter_enabled:
description:
- Enable virtual network filter.
returned: always
type: bool
sample: true
enable_automatic_failover:
description:
- Enable automatic failover.
returned: always
type: bool
sample: true
enable_cassandra:
description:
- Enable Cassandra.
returned: always
type: bool
sample: true
enable_table:
description:
- Enable Table.
returned: always
type: bool
sample: true
enable_gremlin:
description:
- Enable Gremlin.
returned: always
type: bool
sample: true
virtual_network_rules:
description:
- List of Virtual Network ACL rules configured for the Cosmos DB account.
type: list
contains:
subnet:
description:
- Resource id of a subnet.
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNet
works/testvnet/subnets/testsubnet1"
ignore_missing_vnet_service_endpoint:
description:
- Create Cosmos DB account without existing virtual network service endpoint.
type: bool
enable_multiple_write_locations:
description:
- Enable multiple write locations.
returned: always
type: bool
sample: true
document_endpoint:
description:
- Document endpoint.
returned: always
type: str
sample: https://testaccount.documents.azure.com:443/
provisioning_state:
description:
- Provisioning state of Cosmos DB.
returned: always
type: str
sample: Succeeded
primary_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
secondary_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
primary_readonly_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
secondary_readonly_master_key:
description:
- Primary master key.
returned: when requested
type: str
sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
connection_strings:
description:
- List of connection strings.
type: list
returned: when requested
contains:
connection_string:
description:
- Description of connection string.
type: str
returned: always
sample: Primary SQL Connection String
description:
description:
- Connection string.
type: str
returned: always
sample: "AccountEndpoint=https://testaccount.documents.azure.com:443/;AccountKey=fSEjathnk6ZeBTrXkud9j5kfhtSEQ
q3dpJxJga76h9BZkK2BJJrDzSO6DDn6yKads017OZBZ1YZWyq1cW4iuvA=="
tags:
description:
- Tags assigned to the resource. Dictionary of "string":"string" pairs.
returned: always
type: dict
sample: { "tag1":"abc" }
'''
from ansible_collections.azure.azcollection.plugins.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _camel_to_snake
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.cosmosdb import CosmosDB
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMCosmosDBAccountInfo(AzureRMModuleBase):
def __init__(self):
# define user inputs into argument
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
),
tags=dict(
type='list'
),
retrieve_keys=dict(
type='str',
choices=['all', 'readonly']
),
retrieve_connection_strings=dict(
type='bool'
)
)
# store the results of the module operation
self.results = dict(
changed=False
)
self.mgmt_client = None
self.resource_group = None
self.name = None
self.tags = None
self.retrieve_keys = None
self.retrieve_connection_strings = None
super(AzureRMCosmosDBAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_cosmosdbaccount_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_cosmosdbaccount_facts' module has been renamed to 'azure_rm_cosmosdbaccount_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
base_url=self._cloud_environment.endpoints.resource_manager)
if self.name is not None:
self.results['accounts'] = self.get()
elif self.resource_group is not None:
self.results['accounts'] = self.list_all()
return self.results
def get(self):
response = None
results = []
try:
response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
account_name=self.name)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Database Account.')
if response and self.has_tags(response.tags, self.tags):
results.append(self.format_response(response))
return results
def list_by_resource_group(self):
response = None
results = []
try:
response = self.mgmt_client.database_accounts.list_by_resource_group(resource_group_name=self.resource_group)
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Database Account.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_response(item))
return results
def list_all(self):
response = None
results = []
try:
response = self.mgmt_client.database_accounts.list()
self.log("Response : {0}".format(response))
except CloudError as e:
self.log('Could not get facts for Database Account.')
if response is not None:
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.format_response(item))
return results
def format_response(self, item):
d = item.as_dict()
d = {
'id': d.get('id'),
'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
'name': d.get('name', None),
'location': d.get('location', '').replace(' ', '').lower(),
'kind': _camel_to_snake(d.get('kind', None)),
'consistency_policy': {'default_consistency_level': _camel_to_snake(d['consistency_policy']['default_consistency_level']),
'max_interval_in_seconds': d['consistency_policy']['max_interval_in_seconds'],
'max_staleness_prefix': d['consistency_policy']['max_staleness_prefix']},
'failover_policies': [{'name': fp['location_name'].replace(' ', '').lower(),
'failover_priority': fp['failover_priority'],
'id': fp['id']} for fp in d['failover_policies']],
'read_locations': [{'name': rl['location_name'].replace(' ', '').lower(),
'failover_priority': rl['failover_priority'],
'id': rl['id'],
'document_endpoint': rl['document_endpoint'],
'provisioning_state': rl['provisioning_state']} for rl in d['read_locations']],
'write_locations': [{'name': wl['location_name'].replace(' ', '').lower(),
'failover_priority': wl['failover_priority'],
'id': wl['id'],
'document_endpoint': wl['document_endpoint'],
'provisioning_state': wl['provisioning_state']} for wl in d['write_locations']],
'database_account_offer_type': d.get('database_account_offer_type'),
'ip_range_filter': d['ip_range_filter'],
'is_virtual_network_filter_enabled': d.get('is_virtual_network_filter_enabled'),
'enable_automatic_failover': d.get('enable_automatic_failover'),
'enable_cassandra': 'EnableCassandra' in d.get('capabilities', []),
'enable_table': 'EnableTable' in d.get('capabilities', []),
'enable_gremlin': 'EnableGremlin' in d.get('capabilities', []),
'virtual_network_rules': d.get('virtual_network_rules'),
'enable_multiple_write_locations': d.get('enable_multiple_write_locations'),
'document_endpoint': d.get('document_endpoint'),
'provisioning_state': d.get('provisioning_state'),
'tags': d.get('tags', None)
}
if self.retrieve_keys == 'all':
keys = self.mgmt_client.database_accounts.list_keys(resource_group_name=self.resource_group,
account_name=self.name)
d['primary_master_key'] = keys.primary_master_key
d['secondary_master_key'] = keys.secondary_master_key
d['primary_readonly_master_key'] = keys.primary_readonly_master_key
d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
elif self.retrieve_keys == 'readonly':
keys = self.mgmt_client.database_accounts.get_read_only_keys(resource_group_name=self.resource_group,
account_name=self.name)
d['primary_readonly_master_key'] = keys.primary_readonly_master_key
d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
if self.retrieve_connection_strings:
connection_strings = self.mgmt_client.database_accounts.list_connection_strings(resource_group_name=self.resource_group,
account_name=self.name)
d['connection_strings'] = connection_strings.as_dict()
return d
def main():
AzureRMCosmosDBAccountInfo()
if __name__ == '__main__':
main()
| 19,585 | 4,965 |
from rdflib import Graph
import json
import glob
books = {}
rdf_files = glob.glob("gutindex/cache/epub/*/*.rdf")
i = 1
for rdf_file in rdf_files:
g = Graph()
g.parse(rdf_file)
for s,p,o in g:
if 'title' in p:
books[str(o)] = str(s)
print(i, str(o))
i+=1
with open("gutindex_titles.json", "w") as f:
json.dump(books, f)
| 382 | 160 |
## _____ _____
## | __ \| __ \ AUTHOR: Pedro Rivero
## | |__) | |__) | ---------------------------------
## | ___/| _ / DATE: May 18, 2021
## | | | | \ \ ---------------------------------
## |_| |_| \_\ https://github.com/pedrorrivero
##
## Copyright 2021 Pedro Rivero
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from abc import ABC, abstractmethod
###############################################################################
## BIT CACHE INTERFACE
###############################################################################
class BitCache(ABC):
"""
BitCache FIFO (first in, first out) data structure.
Attributes
----------
size: int
The number of bits currently stored in the BitCache.
Methods
-------
dump() -> str
Outputs all the contents in the cache without erasing.
flush() -> None:
Erases the cache.
pop(n: int) -> str:
Returns a size `n` bitstring removing it from the top of the cache.
push(bitstring: str) -> None:
Inserts bitstring at the end of the cache.
"""
############################### PUBLIC API ###############################
@property
@abstractmethod
def size(self) -> int:
"""
The number of bits currently stored in the BitCache.
"""
pass
@abstractmethod
def dump(self) -> str:
"""
Outputs all the contents in the cache without erasing.
Returns
-------
out: str
Cache contents.
"""
pass
@abstractmethod
def flush(self) -> None:
"""
Erases the cache.
"""
pass
@abstractmethod
def pop(self, num_bits: int) -> str:
"""
Returns a size `n` bitstring removing it from the top of the cache.
Parameters
----------
num_bits: int
Number of bits to retrieve.
Returns
-------
out: str
Size `n` bitstring.
Raises
------
TypeError
If input is not int.
ValueError
If input is less than one.
RuntimeError
If input is greater than cache size.
"""
pass
@abstractmethod
def push(self, bitstring: str) -> None:
"""
Inserts bitstring at the end of the cache.
Parameters
----------
bitstring: str
The bitstring to insert.
Raises
------
TypeError
If input bitstring is not str.
ValueError
If input bitstring is not a valid bitstring.
"""
pass
| 3,188 | 870 |
"""
Log multiple instances to same file.
"""
def log(msg):
f = open('error.log', 'a')
f.write(msg+'\n')
f.close()
def clear():
f = open('error.log', 'w')
f.write('')
f.close()
| 203 | 85 |
from Stream.Stream import Stream
class TransportStream(Stream):
"""
传输层流对象
代表一个传输层流,包含这个流中的所有数据包以及流的基本信息
protocol: 传输层的具体协议,UDP或TCP
side_a: A端IP地址(流的第一条报文的src)
side_b: B端IP地址(流的第一条报文的dst)
port_a: A端端口号
port_b: B端端口号
packets:流的报文列表
stream_id: wireshark内的stream index
direct: 流的方向
- None 没有方向(初级流对象,可以继续follow)
- 'A->B' side_a到side_b方向,不可继续follow
- 'A<-B' side_b到side_a方向,不可继续follow
- 'A<->B' 双向,不可继续follow
返回:
对象本身无返回,调用follow函数将返回被跟踪方向的子流对象
"""
def __init__(self, protocol, side_a, side_b, port_a, port_b, plist, stream_id, direct=None):
Stream.__init__(self, stream_id, side_a, side_b, plist, direct)
self.protocol = protocol
self.port_a = port_a
self.port_b = port_b
if not self.direct:
self.packets.sort()
self.start_time = self.packets[0].time
self.end_time = self.packets[-1].time
self.num = len(self.packets)
self.size = 0
for p in self.packets:
self.size += int(p.length)
self.__buffer = {}
def __repr__(self):
if self.direct is not None:
return '<%sStream #%d>' % (self.protocol.upper(), self.stream_id)
else:
return '<%sStream #%d(%s)>' % (self.protocol.upper(), self.stream_id, self.direct)
def follow(self, direct):
"""
follow流跟踪函数
类似wireshark的流跟踪,具有三个方向
参数:
direct: (String) 流的方向,以下三个可选
- 'A->B'
- 'A<-B'
- 'A<->B'
返回:
所跟踪方向的TransportStream对象,结构与本对象相同,但是不可继续follow
"""
if self.direct:
return None
if direct.upper() == 'A->B':
if direct in self.__buffer:
return self.__buffer['A->B']
follow_list = []
for p in self.packets:
if p.ip.src == self.side_a and p.ip.dst == self.side_b:
follow_list.append(p)
self.__buffer['A->B'] = TransportStream(self.protocol,
self.side_a,
self.side_b,
self.port_a,
self.port_b,
follow_list,
'A->B')
return self.__buffer['A->B']
elif direct.upper() == 'A<-B':
if direct in self.__buffer:
return self.__buffer['A<-B']
follow_list = []
for p in self.packets:
if p.ip.src == self.side_a and p.ip.dst == self.side_b:
follow_list.append(p)
self.__buffer['A<-B'] = TransportStream(self.protocol,
self.side_a,
self.side_b,
self.port_a,
self.port_b,
follow_list,
'A<-B')
return self.__buffer['A<-B']
elif direct.upper() == 'A<->B':
if 'A<->B' in self.__buffer:
return self.__buffer['A<->B']
self.__buffer['A<->B'] = TransportStream(self.protocol,
self.side_a,
self.side_b,
self.port_a,
self.port_b,
self.packets,
'A<->B')
else:
return None
| 3,959 | 1,290 |
"""
This tool compares measured data (observed) with model outputs (predicted), used in procedures of calibration and validation
"""
from __future__ import division
from __future__ import print_function
import os
from math import sqrt
import pandas as pd
from sklearn.metrics import mean_squared_error as calc_mean_squared_error
import cea.config
import cea.inputlocator
from cea_calibration.global_variables import *
# from cea.constants import MONTHS_IN_YEAR_NAMES
# import cea.examples.global_variables as global_variables
# def outputdatafolder(self):
# return self._ensure_folder(self.scenario, 'outputs', 'data')
#
#
# def get_calibrationresults(self):
# """scenario/outputs/data/calibration_results/calibrationresults.csv"""
# return os.path.join(self.scenario, 'outputs', 'data', 'calibration_results', 'calibrationresults.csv')
#
#
# def get_project_calibrationresults(self):
# """project/outputs/calibration_results/calibrationresults.csv"""
# return os.path.join(self.project, 'outputs', 'calibration_results', 'calibrationresults.csv')
#
#
# def get_totaloccupancy(self):
# """scenario/outputs/data/totaloccupancy.csv"""
# return os.path.join(self.scenario, "outputs", "data", "totaloccupancy.csv")
#
#
# def get_measurements_folder(self):
# return self._ensure_folder(self.scenario, 'inputs', 'measurements')
#
#
# def get_annual_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'annual_measurements.csv')
#
#
# def get_monthly_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'monthly_measurements.csv')
#
#
# def get_global_monthly_measurements(self):
# return os.path.join(self.get_measurements_folder(), 'monthly_measurements.csv')
# global_validation_n_calibrated = []
# global_validation_percentage = []
MONTHS_IN_YEAR_NAMES = ['JANUARY', 'FEBRUARY', 'MARCH', 'APRIL',
'MAY', 'JUNE', 'JULY', 'AUGUST', 'SEPTEMBER',
'OCTOBER', 'NOVEMBER', 'DECEMBER']
__author__ = "Luis Santos"
__copyright__ = "Copyright 2020, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Luis Santos, Jimeno Fonseca, Daren Thomas"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def validation(scenario_list,
locators_of_scenarios,
measured_building_names_of_scenarios,
monthly=True,
load='GRID',
):
"""
This tool compares observed (real life measured data) and predicted (output of the model data) values.
Monthly data is compared in terms of NMBE and CvRMSE (follwing ASHRAE Guideline 14-2014).
A new input folder with measurements has to be created, with a csv each for monthly data provided as input for this tool.
The input file contains: Name (CEA ID)| ZipCode (optional) | Monthly Data (JAN - DEC) | Type of equivalent variable in CEA (GRID_kWh is the default for total electricity consumption)
The script prints the NBME and CvRMSE for each building. It also outputs the number of calibrated buildings and a score metric (calibrated buildings weighted by their energy consumption)
"""
## monthly validation
if monthly:
number_of_buildings = 0
print("monthly validation")
validation_output = pd.DataFrame(columns=['scenario', 'calibrated_buildings', 'score'])
for scenario, locator, measured_building_names in zip(scenario_list, locators_of_scenarios,
measured_building_names_of_scenarios):
list_of_scores = []
number_of_calibrated = []
number_of_buildings = number_of_buildings + len(measured_building_names)
# get measured data for buildings in this scenario
monthly_measured_data = pd.read_csv(locator.get_monthly_measurements())
# loop in the measured buildings of this scenario
for building_name in measured_building_names: # number of buildings that have real data available
# extract measured data
print('For building', building_name, 'the errors are')
fields_to_extract = ['Name'] + MONTHS_IN_YEAR_NAMES
monthly_measured_demand = monthly_measured_data[fields_to_extract].set_index('Name')
monthly_measured_demand = monthly_measured_demand.loc[building_name]
monthly_measured_demand = pd.DataFrame({'Month': monthly_measured_demand.index.values,
'measurements': monthly_measured_demand.values})
# extract model output
hourly_modelled_data = pd.read_csv(locator.get_demand_results_file(building_name),
usecols=['DATE', load + '_kWh'])
hourly_modelled_data['DATE'] = pd.to_datetime(hourly_modelled_data['DATE'])
look_up = {1: 'JANUARY', 2: 'FEBRUARY', 3: 'MARCH', 4: 'APRIL', 5: 'MAY',
6: 'JUNE', 7: 'JULY', 8: 'AUGUST', 9: 'SEPTEMBER', 10: 'OCTOBER', 11: 'NOVEMBER',
12: 'DECEMBER'}
# this step is required to have allow the conversion from hourly to monthly data
monthly_modelled_data = hourly_modelled_data.resample('M', on='DATE').sum() # because data is in kWh
monthly_modelled_data['Month'] = monthly_modelled_data.index.month
monthly_modelled_data['Month'] = monthly_modelled_data.apply(lambda x: look_up[x['Month']], axis=1)
monthly_data = monthly_modelled_data.merge(monthly_measured_demand, on='Month')
# calculate errors
cv_root_mean_squared_error, normalized_mean_biased_error = calc_errors_per_building(load, monthly_data)
ind_calib_building, ind_score_building = calc_building_score(cv_root_mean_squared_error, monthly_data,
normalized_mean_biased_error)
# appending list of variables for later use
number_of_calibrated.append(ind_calib_building)
list_of_scores.append(ind_score_building)
n_scenario_calib = sum(number_of_calibrated)
scenario_score = sum(list_of_scores)
scenario_name = os.path.basename(scenario)
validation_output = validation_output.append(
{'scenario': scenario_name, 'calibrated_buildings': n_scenario_calib, 'score': scenario_score},
ignore_index=True)
n_calib = validation_output['calibrated_buildings'].sum()
score = validation_output['score'].sum()
global_validation_n_calibrated.append(n_calib)
global_validation_percentage.append((n_calib / number_of_buildings) * 100)
print('The number of calibrated buildings is', n_calib)
print('The final score is', score)
return score
def calc_errors_per_building(load, monthly_data):
biased_error = monthly_data['measurements'] - monthly_data[load + '_kWh']
normalized_mean_biased_error = ((biased_error.sum() / 12) / monthly_data[
'measurements'].mean()) * 100 # %
print('NMBE:', round(normalized_mean_biased_error,1))
mean_squared_error = calc_mean_squared_error(monthly_data['measurements'], monthly_data[load + '_kWh'])
root_mean_squared_error = sqrt(mean_squared_error) # root mean squared error
cv_root_mean_squared_error = root_mean_squared_error * 100 / monthly_data['measurements'].mean()
print('CVRMSE:', round(cv_root_mean_squared_error,1))
return cv_root_mean_squared_error, normalized_mean_biased_error
def calc_building_score(cv_root_mean_squared_error, monthly_data, normalized_mean_biased_error):
# indicates if the building is calibrated or not
if abs(normalized_mean_biased_error) < 5 and cv_root_mean_squared_error < 15: #a building is considered calibrated if NMBE<5 and CVRMSE <15 (ASHRAE Guideline for monthly data)
ind_calib_building = 1
else:
ind_calib_building = 0
# weights the calibration by building energy consumption
ind_score_building = ind_calib_building * sum(monthly_data['measurements'])
return ind_calib_building, ind_score_building
def get_measured_building_names(locator):
monthly_measured_data = pd.read_csv(locator.get_monthly_measurements())
measured_building_names = monthly_measured_data.Name.values
measured_building_names = list(measured_building_names)
return measured_building_names
def main(config):
"""
This is the main entry point to your script. Any parameters used by your script must be present in the ``config``
parameter. The CLI will call this ``main`` function passing in a ``config`` object after adjusting the configuration
to reflect parameters passed on the command line - this is how the ArcGIS interface interacts with the scripts
BTW.
:param config:
:type config: cea.config.Configuration
:return:
"""
assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario
locator = cea.inputlocator.InputLocator(config.scenario, config.plugins)
measured_building_names = get_measured_building_names(locator)
scenario_list = [config.scenario]
locators_of_scenarios = [locator]
measured_building_names_of_scenarios = [measured_building_names]
validation(scenario_list,
locators_of_scenarios,
measured_building_names_of_scenarios,
monthly=True,
load='GRID',
)
if __name__ == '__main__':
main(cea.config.Configuration())
| 9,775 | 3,011 |
try:
from public_config import *
except ImportError:
pass
HOST = '0.0.0.0'
PORT = 9038
SERVICE_NAME = 'jobs'
SERVER_ENV = 'prod'
SQLALCHEMY_POOL_SIZE = 10
SQLALCHEMY_POOL_RECYCLE = 3600
JOBS = [
{ # 任务 信用积分每日检查, 每周一到每周五 早上 10:30 分运行
# 检查每个设备的借用日期是否超时 :发送提醒邮件,扣除信用分 1分
'id': 'credit-check-daily', # 任务 id, 唯一
'func': 'apps.jobs.business.jobs:JobsBusiness.credit_check_daily', # 路径
'args': None, # 参数
'trigger': 'cron', # 启动方式, 时间间隔
'day_of_week': 'mon-fri', # 周1 - 周5
'hour': 11, # 早上 11 点
'minute': 30, # 具体分钟数
# 'trigger': 'interval', # 启动方式 时间区间
# 'hours': 10
# 'seconds': 10
},
{
# cidata 数据更新
'id': 'cijob_update', # 任务 id, 唯一
'func': 'apps.extention.business.cidata:CiJobBusiness.update_jenkins_data', # 路径
'args': None, # 参数
'trigger': 'interval', # 启动方式 时间区间
'hours': 10
# 'seconds': 10
},
{
# 定时把redis中的接口调用情况放到数据库中
'id': 'get_statistics_route_job', # 任务 id, 唯一
'func': 'apps.public.daos.public:get_statistics_route_job', # 路径
'args': None, # 参数
'trigger': 'interval', # 启动方式, 时间间隔
'day_of_week': 'mon-fri', # 周1 - 周5
'hour': 3, # 早上 3 点
# 'minute': 5, # 具体分钟数
}
]
| 1,337 | 732 |
import pyautogui, pygetwindow, time
screen = pygetwindow.getWindowsWithTitle('Old School RuneScape')[0]
centerRatio = (2.8,2.8)
def move(direction,spaces):
vel = [0,0]
if direction == "up": vel[1] = 0.32;
if direction == "down": vel[1] = -0.32;
if direction == "left": vel[0] = 0.3;
if direction == "right": vel[0] = -0.3;
print(vel)
newRatio = (centerRatio[0] + (vel[0] * spaces),centerRatio[1] + (vel[1] * spaces))
pyautogui.click(screen.left + (screen.width/newRatio[0]),screen.top+(screen.height/newRatio[1]))
time.sleep(1.5)
#
while True:
boxs = list(pyautogui.locateAllOnScreen("miniTree.png"))
box = boxs[len(boxs)//2]
if box:
pyautogui.click(box)
#tinder = pyautogui.locateCenterOnScreen("tinderbox.png")
#pyautogui.click(tinder)
time.sleep(7)
move("left",1)
time.sleep(3)
#print("found")
| 918 | 368 |
#!/usr/bin/python
'''
Produces POSIX commands to setup the environment variables for Geant4.
Required command line arguments:
1: Location of geant4.sh script.
2: Version of Geant4.
'''
import os
import sys
import re
import subprocess as subp
from codecs import encode,decode
geant4_sh, geant4_version = sys.argv[1:]
# vars and standard directory names
geant4_vars = {
"G4ABLADATA" : "G4ABLA",
"G4LEDATA" : "G4EMLOW",
"G4LEVELGAMMADATA" : "PhotonEvaporation",
"G4NEUTRONHPDATA" : "G4NDL",
"G4NEUTRONXSDATA" : "G4NEUTRONXS",
"G4PIIDATA" : "G4PII",
"G4RADIOACTIVEDATA": "RadioactiveDecay",
"G4REALSURFACEDATA": "RealSurface",
"G4ENSDFSTATEDATA" : "G4ENSDFSTATE2.2",
"G4SAIDXSDATA" : "G4SAIDDATA1.1"
}
geant4_env = {}
# try to get vars from geant4.sh script
if os.path.isfile(geant4_sh):
p = subp.Popen("/bin/bash",
stdin=subp.PIPE,
stdout=subp.PIPE,
cwd=os.path.dirname(geant4_sh),
env={})
penv = decode(p.communicate(encode("source geant4.sh && env"))[0].strip())
for line in penv.split("\n"):
sep = line.index("=")
var = line[:sep]
value = line[sep+1:]
if var in geant4_vars:
geant4_env[var] = value
formatted_pairs = []
for var in geant4_vars:
value = None
if var in os.environ:
# warn user that existing environment variables override this script,
# but don't complain if we are just running inside an env-shell.sh
value = os.environ[var]
if not "I3_SHELL" in os.environ:
sys.stderr.write(("Warning: Geant4 environment variable already set {0}={1}, "
"this overrides automatic detection\n")
.format(var, value))
elif var in geant4_env:
value = geant4_env[var]
if value is None:
sys.stderr.write(("Warning: Geant4 environment variable {0} could not be set, "
"g4-based modules may crash\n").format(var))
else:
formatted_pairs.append("{0}={1}".format(var, value))
# extra formatting for env-shell.sh
sys.stdout.write(" \\\n\t".join(formatted_pairs))
| 2,251 | 793 |
#! /usr/bin/env python
from __future__ import absolute_import
import os
import sys
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(PROJECT_DIR)
sys.path.append(os.path.abspath(os.path.join(PROJECT_DIR, "app")))
if __name__ == "__main__":
from app.main import Main
aws_bucket_name = None
if len(sys.argv) > 1:
aws_bucket_name = sys.argv[1]
Main().load_images(aws_bucket_name)
| 454 | 178 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
PixelExtractor
"""
__all__ = ["PixelExtractor"]
from ....entrypoints.transforms_imagepixelextractor import \
transforms_imagepixelextractor
from ....utils.utils import trace
from ...base_pipeline_item import BasePipelineItem, DefaultSignature
class PixelExtractor(BasePipelineItem, DefaultSignature):
"""
Extracts the pixel values from an image.
.. remarks::
``PixelExtractor`` extracts the pixel values from an image. The input
variables
are images of the same size, typically the output of a ``Resizer``
transform. The
output are pixel data in vector form that are typically used as
features for a learner.
:param use_alpha: Specifies whether to use alpha channel. The default
value is ``False``.
:param use_red: Specifies whether to use red channel. The default value
is ``True``.
:param use_green: Specifies whether to use green channel. The default
value is ``True``.
:param use_blue: Specifies whether to use blue channel. The default value
is ``True``.
:param interleave_argb: Whether to separate each channel or
interleave in ARGB order. This might be important, for example, if
you are training
a convolutional neural network, since this would affect the shape of
the kernel, stride etc.
:param convert: Whether to convert to floating point. The default value
is ``False``.
:param offset: Specifies the offset (pre-scale). This requires ``convert
= True``.
The default value is `None`.
:param scale: Specifies the scale factor. This requires ``convert =
True``.
The default value is `None`.
:param params: Additional arguments sent to compute engine.
.. seealso::
:py:class:`Loader <nimbusml.feature_extraction.image.Loader>`,
:py:class:`Resizer <nimbusml.feature_extraction.image.Resizer>`.
.. index:: transform, image
Example:
.. literalinclude:: /../nimbusml/examples/Image.py
:language: python
"""
@trace
def __init__(
self,
use_alpha=False,
use_red=True,
use_green=True,
use_blue=True,
interleave_argb=False,
convert=True,
offset=None,
scale=None,
**params):
BasePipelineItem.__init__(
self, type='transform', **params)
self.use_alpha = use_alpha
self.use_red = use_red
self.use_green = use_green
self.use_blue = use_blue
self.interleave_argb = interleave_argb
self.convert = convert
self.offset = offset
self.scale = scale
@property
def _entrypoint(self):
return transforms_imagepixelextractor
@trace
def _get_node(self, **all_args):
input_columns = self.input
if input_columns is None and 'input' in all_args:
input_columns = all_args['input']
if 'input' in all_args:
all_args.pop('input')
output_columns = self.output
if output_columns is None and 'output' in all_args:
output_columns = all_args['output']
if 'output' in all_args:
all_args.pop('output')
# validate input
if input_columns is None:
raise ValueError(
"'None' input passed when it cannot be none.")
if not isinstance(input_columns, list):
raise ValueError(
"input has to be a list of strings, instead got %s" %
type(input_columns))
# validate output
if output_columns is None:
output_columns = input_columns
if not isinstance(output_columns, list):
raise ValueError(
"output has to be a list of strings, instead got %s" %
type(output_columns))
algo_args = dict(
column=[
dict(
Source=i,
Name=o) for i,
o in zip(
input_columns,
output_columns)] if input_columns else None,
use_alpha=self.use_alpha,
use_red=self.use_red,
use_green=self.use_green,
use_blue=self.use_blue,
interleave_argb=self.interleave_argb,
convert=self.convert,
offset=self.offset,
scale=self.scale)
all_args.update(algo_args)
return self._entrypoint(**all_args)
| 4,906 | 1,301 |
#
# @lc app=leetcode.cn id=17 lang=python3
#
# [17] 电话号码的字母组合
#
# https://leetcode-cn.com/problems/letter-combinations-of-a-phone-number/description/
#
# algorithms
# Medium (47.70%)
# Total Accepted: 18K
# Total Submissions: 37.5K
# Testcase Example: '"23"'
#
# 给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。
#
# 给出数字到字母的映射如下(与电话按键相同)。注意 1 不对应任何字母。
#
#
#
# 示例:
#
# 输入:"23"
# 输出:["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
#
#
# 说明:
# 尽管上面的答案是按字典序排列的,但是你可以任意选择答案输出的顺序。
#
#
from itertools import product
class Solution:
def letterCombinations(self, digits: str) -> list:
# 创建字母对应的字符列表的字典
dic = {2: ['a', 'b', 'c'],
3: ['d', 'e', 'f'],
4: ['g', 'h', 'i'],
5: ['j', 'k', 'l'],
6: ['m', 'n', 'o'],
7: ['p', 'q', 'r', 's'],
8: ['t', 'u', 'v'],
9: ['w', 'x', 'y', 'z'],
}
# 存储结果的数组
ret_str = []
if len(digits) == 0:
return []
# 递归出口,当递归到最后一个数的时候result拿到结果进行for循环遍历
if len(digits) == 1:
return dic[int(digits[0])]
# 递归调用, 每次移动一位
result = self.letterCombinations(digits[1:])
# result是一个数组列表,遍历后字符串操作,加入列表
for r in result:
for j in dic[int(digits[0])]:
ret_str.append(j + r)
return ret_str
# if len(digits) == 1:
# return list(d[digits])
#
# x = ['']
# for char in digits:
# if char in {'7', '9'}:
# x *= 4
# else:
# x *= 3
# # print(x)
# # l_x = len(x)
# for v in digits:
# l_v = len(d[v])
# # print(l_v)
# for i in range(l_v):
# for ii, j in enumerate(d[v]):
# # print(j)
# x[i * l_v + ii] += j
# for bb in product(x):
# print(bb)
# return x
S = Solution()
print(S.letterCombinations("23"))
| 2,019 | 965 |
import random
import argparse
from ast import Global
from dis import dis
from glob import glob
from itertools import count
from math import dist
from logger import *
import json
import gym
from matplotlib.pyplot import axis
import scipy.optimize
import pdb
import torch
from torch.autograd import Variable
from jax_rl.agents import AWACLearner, SACLearner
from jax_rl.datasets import ReplayBuffer
from jax_rl.evaluation import evaluate
from jax_rl.utils import make_env
import numpy as np
import pickle
import random
import copy
from sklearn.cluster import KMeans# import ant
# import swimmer
# import reacher
# import walker
# import halfcheetah
# import inverted_double_pendulum
import sys
sys.path.append('../all_envs')
import swimmer
import walker
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--env-name', default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--seed', type=int, default=543, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='random seed (default: 1)')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--save_path', type=str, default= 'temp', metavar='N',
help='path to save demonstrations on')
parser.add_argument('--xml', type=str, default= None, metavar='N',
help='For diffent dynamics')
parser.add_argument('--demo_files', nargs='+')
parser.add_argument('--test_demo_files', nargs='+')
parser.add_argument('--ratio', type=float, nargs='+')
parser.add_argument('--eval-interval', type=int, default=1000)
parser.add_argument('--restore_model', default=None)
parser.add_argument('--mode')
parser.add_argument('--discount', type=float, default=0.9)
parser.add_argument('--discount_train', action='store_true')
parser.add_argument('--fixed_train', action='store_true')
parser.add_argument('--algo', default='sac', help='the algorithm of RL')
parser.add_argument('--max_steps', type=int, default=int(1e6), help='the maximum number of steps')
parser.add_argument('--start_training', type=int, default=int(1e4), help='Number of training steps to start training.')
args = parser.parse_args()
logger = CompleteLogger('log/'+ args.env_name + '/'+ os.path.splitext(args.xml)[0] + 'resplit')
# re-define datasets
json.dump(vars(args), logger.get_args_file(), sort_keys=True, indent=4)
target_env_name = os.path.splitext(args.xml)[0]
def load_demos(demo_files, ratio):
all_demos = []
all_init_obs = []
for demo_file in demo_files:
raw_demos = pickle.load(open(demo_file, 'rb'))
# use fix ratio for every domain
use_num = int(len(raw_demos['obs'])*ratio[0])
all_demos = all_demos + raw_demos['obs'][:use_num]
if 'init_obs' in raw_demos:
all_init_obs = all_init_obs + raw_demos['init_obs'][:use_num]
return all_demos, all_init_obs
def load_pairs(demo_files, ratio):
all_pairs = []
for demo_file in demo_files:
raw_demos = pickle.load(open(demo_file, 'rb'))
for i in range(int(len(raw_demos['obs'])*ratio)):
obs = np.array(raw_demos['obs'][i])
next_obs = np.array(raw_demos['next_obs'][i])
all_pairs.append(np.reshape(np.concatenate([obs, next_obs], axis=1), (obs.shape[0], 2, -1)))
return np.concatenate(all_pairs, axis=0)
global iters
def Max_Clique(t, size):
global bestx #最大团节点编号
global bestn #最大团节点数
global iters
if iters > 9000000: # can be seen as 收敛
return
iters += 1
if t >= size:
sum = 0
for value in x:
sum = sum + value
if sum > bestn:
bestx = copy.deepcopy(x)
bestn = sum
print("iters: ", iters)
iters = 0
# pdb.set_trace()
# if bestn > int(0.5 * size):
# return
else:
#判断与当前节点是否形成团
flag = 1
for i in range(0,t):
if x[i] == 1 and graph_matrix[t][i] == 0:
flag = 0
if flag == 0:# 不形成团,则判断下一个节点
x[t] = 0
Max_Clique(t+1, size)
else: # 形成团,则依次判断该节点加入/不加入
for j in range(0,2):
x[t] = j
Max_Clique(t+1, size)
def update_graph(G, nodes_to_delete):
# node_id = 0
# for exist in nodes_to_delete:
# if exist == 1:
# G[:, node_id] = 0 # clear all edges of node node_id
# G[node_id, :] = 0 # clear all edges of node node_id
# node_id += 1
for node_id in nodes_to_delete:
G[:, node_id] = 0 # clear all edges of node node_id
G[node_id, :] = 0 # clear all edges of node node_id
# G[:, nodes_to_delete] = 0 # clear all edges of node node_id
# G[nodes_to_delete, :] = 0 # clear all edges of node node_id
def is_all_clear(Graph):
return np.count_nonzero(Graph) == 0
def greedy_independ(Graph):
global x #最大团节点编号
global bestn #最大团节点数
global bestx # list
degree_mat = np.sum(Graph, axis=-1)
degree_mat = np.where(degree_mat==0, 10000, degree_mat)
# pdb.set_trace()
x = degree_mat.argmin()
bestn += 1
bestx[x] = 1
# if __name__ == '__main__':
# #输入一个图,用二维数组存储
# #输入节点数量
# print('图中节点个数为:')
# n = 4
# G_list = [[1,1,1,0], [1,1,1,0], [1,1,1,1], [0,0,1,1]]
# print('图的邻接矩阵为:')
# # for i in range(n):
# # G_list.append(input().split(','))
# x = [0 for i in range(n)]
# G_list = np.array(G_list)
# while(not is_all_clear(G_list)):
# print(G_list)
# global bestn
# bestn = 0
# Max_Clique(0)
# print(bestx,bestn)
# pdb.set_trace()
# update_graph(G=G_list, nodes_to_delete=bestx)
# def re_split_demos(demos_all):
# size = len(demos_all)
# traj_len = len(demos_all[0])
# pdb.set_trace()
# dist_matrix = np.zeros((size, size)) # 200 * 200
# look_1 = np.expand_dims(np.array(demos_all), axis=0) # 1 * 200 * 1000 * 18
# look_2 = np.expand_dims(np.array(demos_all), axis=1) # 200 * 1 * 1000 * 18
# dist_matrix = np.sum(abs(look_1 - look_2), axis=-1) # 200 * 200 * 1000
# # dist_matrix = np.linalg.norm(look_1 - look_2, axis=-1)
# dist_matrix = np.mean(dist_matrix, dim=-1)
# # for i in range(size):
# # for j in range(size):
# # dist_matrix[i][j] = calculate_traj_dist(demos_all[i], demos_all[j])
# global graph_matrix
# # # clique
# # graph_matrix = dist_matrix < (dist_matrix.mean() * 1.1)
# # independent
# graph_matrix = dist_matrix > (dist_matrix.mean() * 0.9)
# print("sample graph:", graph_matrix[0])
# graph_matrix = graph_matrix.astype(int)
# split_done = False
# split_clique=[]
# while(not split_done):
# global x
# # print(G_list)
# global bestn
# global iters
# x = [0 for i in range(size)]
# bestn = 0
# iters = 0
# # pdb.set_trace()
# Max_Clique(0, size=size)
# print(bestx, bestn)
# update_graph(G=graph_matrix, nodes_to_delete=bestx)
# # pdb.set_trace()
# clique = [i for i, x in enumerate(bestx) if x == 1]
# if len(clique) > int(0.1 * size):
# split_clique.append(clique)
# split_done = is_all_clear(graph_matrix)
# print('re_cluster id:', split_clique)
# pdb.set_trace()
# # save new demo clique
# raw_demos = {}
# for i in range(len(split_clique)):
# save_demo_path = '../demo/walker2d/re_split_{}_batch_00.pkl'.format(i)
# raw_demos['obs'] = [demos_all[idx] for idx in split_clique[i]]
# pickle.dump(raw_demos, open(save_demo_path, 'wb'))
def re_split_demos(demos_all):
size = len(demos_all)
traj_len = len(demos_all[0])
# pdb.set_trace()
dist_matrix = np.zeros((size, size)) # 200 * 200
look_1 = np.expand_dims(np.array(demos_all_normed), axis=0) # 1 * 200 * 1000 * 18
look_2 = np.expand_dims(np.array(demos_all_normed), axis=1) # 200 * 1 * 1000 * 18
# dist_matrix = np.sum(abs(look_1 - look_2), axis=-1) # 200 * 200 * 1000
dist_matrix = np.linalg.norm(look_1 - look_2, axis=-1)
dist_matrix = np.mean(dist_matrix, axis=-1)
save_dist_path = os.path.join('../demo', target_env_name,'dist_matrix.pkl')
pickle.dump(dist_matrix, open(save_dist_path, 'wb'))
# for i in range(size):
# for j in range(size):
# dist_matrix[i][j] = calculate_traj_dist(demos_all[i], demos_all[j])
global graph_matrix
# # clique
# graph_matrix = dist_matrix < (dist_matrix.mean() * 1.1)
# independent
graph_matrix = dist_matrix < (dist_matrix.mean() * 1.)
print("sample graph:", graph_matrix[0])
graph_matrix = graph_matrix.astype(int)
split_clique=[]
global bestx
bestx = [0 for i in range(size)]
all_clear = False
# remember the true graph
graph_memory = copy.deepcopy(graph_matrix)
decay_step = 0
while(not all_clear):
decay_step += 1
bestx = [0 for i in range(size)]
graph_matrix = copy.deepcopy(graph_memory)
split_done = False
# look for independent set for one time
while(not split_done):
global x
# print(G_list)
global bestn
global iters
bestn = 0
# pdb.set_trace()
# Max_Clique(0, size=size)
greedy_independ(graph_matrix) # set bestx = bestx U x
update_graph(G=graph_matrix, nodes_to_delete=find_neighbor_x(node_id=x, Graph=graph_matrix))
split_done = is_all_clear(graph_matrix)
# print(bestx, bestn)
# find one independent set(approximately)
clique = [i for i, x in enumerate(bestx) if x == 1]
if len(clique) > int(0.2 * size):
split_clique.append(clique)
print('re_cluster id:', split_clique)
# to contain more nodes
graph_memory = (dist_matrix < (dist_matrix.mean() * (1. - 0.05 * decay_step))).astype(int)
update_graph(G=graph_memory, nodes_to_delete=[x for clique in split_clique for x in clique])
# check if all the demos have been selected
all_clear = is_all_clear(graph_memory)
# pdb.set_trace()
print("total rounds:{}".format(decay_step))
# pdb.set_trace()
# save new demo clique
# evaluate
temp = []
for i in range(len(split_clique)):
temp.append([demos_all_normed[idx] for idx in split_clique[i]])
evaluate_cluster_within(temp)
# evaluate cluster 0 & 1
evaluate_cluster_between([demos_all_normed[idx] for idx in split_clique[0]], [demos_all_normed[idx] for idx in split_clique[1]])
raw_demos = {}
for i in range(len(split_clique)):
save_demo_path = '../demo/' + target_env_name + '/re_split_far_{}_batch_00.pkl'.format(i)
raw_demos['obs'] = [demos_all[idx] for idx in split_clique[i]]
pickle.dump(raw_demos, open(save_demo_path, 'wb'))
print("=>save cluster {}: {} traj".format(i, len(raw_demos['obs'])))
def random_split_demo(demos, cluster_num=4):
size = len(demos) // cluster_num
random.shuffle(demos)
raw_demos = {}
for i in range(cluster_num):
save_demo_path = '../demo/' + target_env_name + '/re_split_random_{}_batch_00.pkl'.format(i)
raw_demos['obs'] = [demos_all[idx + size * i] for idx in range(size)]
pickle.dump(raw_demos, open(save_demo_path, 'wb'))
print("=>save cluster {}: {} traj".format(i, len(raw_demos['obs'])))
# return a list of neigjbor of x (including self)
def find_neighbor_x(node_id, Graph):
neighbor_list = [node_id]
for i in range(len(Graph)):
if Graph[node_id][i] == 1:
neighbor_list.append(i)
return neighbor_list
def calculate_traj_dist(traj1, traj2):
steps = len(traj1)
assert steps == len(traj2)
diff = np.zeros(steps)
for i in range(steps):
diff[i] = abs(np.linalg.norm(traj1[i] - traj2[i], ord=2))
return np.min(diff)
def evaluate_cluster_within(trajs): # shape: n_domain * x * 1000 * 18
min_within_traj = []
max_within_traj = []
# pdb.set_trace()
for domain_trajs in trajs:
look_1 = np.expand_dims(np.array(domain_trajs), axis=0) # 1 * x * 1000 * 18
look_2 = np.expand_dims(np.array(domain_trajs), axis=1) # x * 1 * 1000 * 18
dist_matrix = np.linalg.norm(look_1 - look_2, axis=-1) #
dist_matrix = np.mean(dist_matrix, axis=-1) # x * x
dist_matrix[np.arange(dist_matrix.shape[0]), np.arange(dist_matrix.shape[0])] = 10000
min_within_traj.append(np.min(dist_matrix)) # to get min of traj i&j
dist_matrix[np.arange(dist_matrix.shape[0]), np.arange(dist_matrix.shape[0])] = 0
max_within_traj.append(np.max(dist_matrix)) # to get max of traj i&j
print('min_within_traj: ', min_within_traj)
print('max_within_traj: ', max_within_traj)
def evaluate_cluster_between(trajs1, trajs2):
look_1 = np.expand_dims(np.array(trajs1), axis=0) # 1 * a * 1000 * 18
look_2 = np.expand_dims(np.array(trajs2), axis=1) # b * 1 * 1000 * 18
dist_matrix = np.linalg.norm(look_1 - look_2, axis=-1) #
dist_matrix = np.mean(dist_matrix, axis=-1) # b * a
# dist_matrix[np.arange(dist_matrix.shape[0]), np.arange(dist_matrix.shape[0])] = 10000
print('min_between_traj: ', np.min(dist_matrix))
print('max_between_traj: ', np.max(dist_matrix))
# main
if args.mode == 'pair':
demos = [load_pairs(args.demo_files[i:i+1], args.ratio[i]) for i in range(len(args.test_demo_files))]
elif args.mode == 'traj':
# load all demos
demos_all, init_obs_all = load_demos(args.demo_files, args.ratio)
test_demos = []
test_init_obs = []
# clean dataset
not_expert = []
for i in range(len(demos_all)):
if len(demos_all[i]) < 1000:
not_expert.append(i) # not expert traj?
if i % 5 == 0:
print("len demos {}:{}".format(i, len(demos_all[i])))
# pdb.set_trace()
for i in reversed(not_expert):
del demos_all[i]
demos_all = np.array(demos_all)
# norm
demos_all_normed = (demos_all / np.expand_dims(np.linalg.norm(demos_all,axis=1), axis=1))
re_split_demos(demos_all=demos_all)
# random_split_demo(demos=demos_all)
# kmeans = KMeans(n_clusters=4, random_state=0).fit(demos_all)
# print(kmeans.labels_)
# pdb.set_trace()
# for i in range(len(args.test_demo_files)):
# demos_single, init_obs_single = load_demos(args.test_demo_files[i:i+1], args.ratio)
# test_demos.append(demos_single) # 4 * 50 * 1000 * 18
# test_init_obs.append(init_obs_single) # 4 * 0?
# pdb.set_trace()
| 15,031 | 5,780 |
#!/usr/bin/env python
#
# tpycl.py is the python support code to allow calling of python-wrapped
# vtk code from tcl scripts
#
# the main class is tpycl, and scripts can
#
import sys
import os
import Tkinter
from __main__ import slicer
import qt
class tpycl(object):
def __init__(self):
self.verbose = False
# when python is initialized inside slicer there is no argv
# so create a fake one if needed
try:
argv0 = sys.argv[0]
except AttributeError:
sys.argv = []
sys.argv.append("")
self.tcl = Tkinter.Tcl()
self.tcl.createcommand("py_eval", self.py_eval)
self.tcl.createcommand("py_package", self.py_package)
self.tcl.createcommand("py_type", self.py_type)
self.tcl.createcommand("py_del", self.py_del)
self.tcl.createcommand("py_puts", self.py_puts)
self.tcl.createcommand("py_after", self.py_after)
self.tcl.createcommand("py_vtkInstanceName", self.py_vtkInstanceName)
self.timer = qt.QTimer()
self.timer.setSingleShot(True)
self.timer.setInterval(0)
self.timer.connect('timeout()', self.after_callback)
if sys.platform == 'win32':
# Update environment variables set by application - unlike other platforms,
# on windows this does not happen automatically so we do it here
# Note that subsquent env changes will not be reflected
for key in os.environ.keys():
self.tcl_putenv(key, os.environ[key])
# This path is Slicer-specific
self.tcl.eval('source "%s/bin/Python/tpycl/tpycl.tcl"' % slicer.app.slicerHome)
def usage(self):
print "tpycl [options] [file.tcl] [arg] [arg]"
print "-v --verbose : debugging info while parsing"
print "-h --help : extra help info"
print ""
print "tpycl is a tcl shell implemented in python that"
print "allows you to import and execute python code from"
print "inside tcl (hence the name - an homage to jcw's typcl which"
print "allows you to call tcl from python)."
print "Not all python constructs supported, but tpycl should be"
print "adequate to call many packages."
exit()
def dprint(self, *args):
""" debug print """
if self.verbose:
for arg in args:
print arg,
print ""
def py_package(self, packageName):
""" imports a vtk-wrapped python package
"""
self.dprint ("importing %s as a package" % packageName)
if packageName == 'vtk':
import vtk
globals()[packageName] = vtk
for name in dir(vtk):
self.tcl.eval("::tpycl::registerClass %s %s.%s" % (name, packageName, name) )
return
package = globals()[packageName] = __import__(packageName)
for name in dir(package):
self.tcl.eval("::tpycl::registerClass %s %s.%s" % (name, packageName, name) )
def py_type(self,string):
""" return true if the string represents a valid python type
such as an int or an instanced variable
"""
try:
exec( "type(%s)"%string, globals() )
except:
return 0
return 1
def py_vtkInstanceName(self,instance):
""" make a unique name for an instance using the classname and
pointer in hex
- assumes the string form of the instance will end with hex
encoding of the pointer, for example: '(vtkImageData)0x2a9a750'
"""
# used to work with vtk 5.6
#return "%s%s" % (instance.GetClassName(), repr(instance).split()[-1][:-1])
# now just strip off the parens
return repr(instance).replace('(','').replace(')','')
def py_del(self,instanceName):
""" deletes a named instance
"""
# only delete if the instanceName exists
if globals().has_key(instanceName):
exec( "del(%s)"%instanceName, globals() )
return None
def py_puts(self, noNewLine, message):
""" print into the python shell
"""
print(message)
if noNewLine == "0":
print("\n")
def py_after(self):
""" sets the QTimer to call the callback
"""
self.timer.start()
def after_callback(self):
""" what gets called when the after timeout happens
"""
self.tcl.eval('::after_callback')
self.timer.stop()
def py_eval(self,cmd):
""" evaluated the python command string and returns the result
- if the result is a vtk object instance, it is registered in the tcl interp
- if the result is a tuple, it is converted to a tcl-style list
"""
cmd = "__tpycl_result = " + cmd
try:
exec( cmd, globals() )
except:
print( "Error executing %s" % cmd )
print( sys.exc_info() )
raise
evalResult = globals()["__tpycl_result"]
try:
if evalResult.IsA("vtkObject"):
instanceName = self.py_vtkInstanceName(evalResult)
if self.tcl_eval("info command %s" % instanceName) == "":
exec ("%s = globals()['__tpycl_result']" % instanceName, globals())
self.tcl_eval( "proc ::%s {args} {tpycl::methodCaller %s %s $args}" % (instanceName, instanceName, instanceName) )
return( instanceName )
except AttributeError:
pass
try:
if evalResult.__class__.__name__ == 'tuple':
returnValue = evalResult[0]
for element in evalResult[1:]:
returnValue = "%s %s" % (returnValue, element)
return( returnValue )
except AttributeError:
pass
return( repr(evalResult) )
def tcl_callback(self, cmd):
""" evaluate tcl code string but don't return the result
(only prints error messages)
"""
self.dprint("callback command is <%s>" % cmd)
try:
result = self.tcl.eval(cmd)
except Tkinter.TclError,error:
print (error)
errorInfo = self.tcl.eval("set ::errorInfo")
print (errorInfo)
def tcl_eval(self, cmd):
""" evaluate tcl code string and return the result
- py_package is a special string to import python code into tcl
- py_eval goes back from tcl into python
"""
self.dprint("command is <%s>" % cmd)
if cmd == 'exit':
exit()
if cmd.startswith("py_package "):
self.py_package( cmd[10:] )
return()
if cmd.startswith("py_eval "):
self.py_eval( cmd[len("py_eval "):] )
return()
try:
result = self.tcl.eval(cmd)
except Tkinter.TclError,error:
print (error)
errorInfo = self.tcl.eval("set ::errorInfo")
print (errorInfo)
return(None)
return(result)
def tcl_putenv(self, key, value):
""" Set environment variable
"""
import re
self.tcl.eval("global env; set env(%s) \"%s\""%(key, re.escape(value)))
def main(self, argv):
""" main loop for the interpreter shell
"""
# parse command line options
self.file = ""
self.args = []
while argv != []:
arg = argv.pop(0)
if arg == "-v" or arg == "--verbose":
self.verbose = True
continue
if arg == "-h" or arg == "--help":
self.usage()
if not self.file:
self.file = arg
else:
self.args.append(arg)
self.dprint("file", self.file)
self.dprint("args", self.args)
# if given a file, run it
if self.file != "":
fp = open(self.file)
while 1:
cmd = fp.readline()
if cmd == "":
break
self.tcl_eval( cmd[:-1] )
# evaluate stdin until eof
while 1:
sys.stdout.write( "% " )
cmd = sys.stdin.readline()[:-1]
if cmd != "":
result = self.tcl_eval( cmd )
if result != None:
print result
if __name__ == "__main__":
tp = tpycl()
tp.main(sys.argv[1:])
| 7,500 | 2,461 |
hex_strings = [
"FF 81 BD A5 A5 BD 81 FF",
"AA 55 AA 55 AA 55 AA 55",
"3E 7F FC F8 F8 FC 7F 3E",
"93 93 93 F3 F3 93 93 93",
]
def hex_data_to_image(hex_data):
for hex_pair in hex_data:
for divisor in reversed(range(len(hex_data))):
print("X" if (int(hex_pair, 16) >> divisor & 1) == 1 else " ", end="")
print()
if __name__ == "__main__":
for x in range(len(hex_strings)):
hex_data = hex_strings[x].split(' ')
hex_data_to_image(hex_data)
| 473 | 231 |
import click
from kryptos.scripts import build_strategy, stress_worker, kill_strat
@click.group(name="strat")
def cli():
pass
cli.add_command(build_strategy.run, "build")
cli.add_command(stress_worker.run, "stress")
cli.add_command(kill_strat.run, "kill")
| 264 | 99 |
#!/usr/bin/env python3
"""Radio scheduling program.
Usage:
album_times.py [--host=HOST] PORT
Options:
--host=HOST Hostname of MPD [default: localhost]
-h --help Show this text
Prints out the last scheduling time of every album.
"""
from datetime import datetime
from docopt import docopt
from mpd import MPDClient
def album_sticker_get(client, album, sticker):
"""Gets a sticker associated with an album."""
# I am pretty sure that MPD only implements stickers for songs, so
# the sticker gets attached to the first song in the album.
tracks = client.find("album", album)
if len(tracks) == 0:
return
return client.sticker_get("song", tracks[0]["file"], "album_" + sticker)
def list_albums(client):
"""Lists albums sorted by last play timestamp."""
# Get all albums
albums = client.list("album")
all_albums = list(
filter(lambda a: a not in ["", "Lainchan Radio Transitions"], albums)
)
# Group albums by when they were last scheduled
albums_by_last_scheduled = {}
last_scheduled_times = []
for album in all_albums:
# Get the last scheduled time, defaulting to 0
try:
last_scheduled = int(album_sticker_get(client, album, "last_scheduled"))
except ValueError:
last_scheduled = 0
# Put the album into the appropriate bucket
if last_scheduled in albums_by_last_scheduled:
albums_by_last_scheduled[last_scheduled].append(album)
else:
albums_by_last_scheduled[last_scheduled] = [album]
last_scheduled_times.append(last_scheduled)
# Pick the 10 oldest times
last_scheduled_times.sort()
for last_scheduled in last_scheduled_times:
dt = datetime.utcfromtimestamp(last_scheduled)
albums = albums_by_last_scheduled[last_scheduled]
print("{}: {}".format(dt.strftime("%Y-%m-%d %H:%M:%S"), albums))
if __name__ == "__main__":
args = docopt(__doc__)
try:
args["PORT"] = int(args["PORT"])
except ValueError:
print("PORT must be an integer")
exit(1)
try:
client = MPDClient()
client.connect(args["--host"], args["PORT"])
except Exception as e:
print(f"could not connect to MPD: {e.args[0]}")
exit(2)
list_albums(client)
| 2,334 | 733 |
#!/usr/bin/env python3
#
# Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# sudo pip3 install pyusb
import usb.core
import usb.util
# find our device
dev = usb.core.find(idVendor=0x0000, idProduct=0x0001)
# was it found?
if dev is None:
raise ValueError('Device not found')
# get an endpoint instance
cfg = dev.get_active_configuration()
intf = cfg[(0, 0)]
outep = usb.util.find_descriptor(
intf,
# match the first OUT endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
inep = usb.util.find_descriptor(
intf,
# match the first IN endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
assert inep is not None
assert outep is not None
test_string = "Hello World!"
outep.write(test_string)
from_device = inep.read(len(test_string))
print("Device Says: {}".format(''.join([chr(x) for x in from_device])))
| 1,116 | 437 |
import ipywidgets as ipw
import traitlets
from IPython.display import clear_output
class SummaryWidget(ipw.VBox):
"""output the convergence summary"""
selected_pseudos = traitlets.Dict(allow_none=True)
def __init__(self):
# Delta mesure
self.output = ipw.Output()
super().__init__(
children=[
self.output,
],
)
@traitlets.observe("selected_pseudos")
def _on_pseudos_change(self, change):
if change["new"]:
with self.output:
clear_output(wait=True)
print_summary(change["new"])
def print_summary(pseudos: dict):
print("Label\t\t\t|Cohesive energy|\t|Phonon frequencies|\t|Pressure|")
for label, output in pseudos.items():
try:
res_coh = output["convergence_cohesive_energy"]["final_output_parameters"]
res_phonon = output["convergence_phonon_frequencies"][
"final_output_parameters"
]
res_pressure = output["convergence_pressure"]["final_output_parameters"]
print(
f'{label}\t({res_coh["wfc_cutoff"]}, {res_coh["rho_cutoff"]:.2f})'
f'\t({res_phonon["wfc_cutoff"]}, {res_phonon["rho_cutoff"]:.2f})'
f'\t({res_pressure["wfc_cutoff"]}, {res_pressure["rho_cutoff"]:.2f})'
)
except Exception as e:
raise e
| 1,426 | 456 |
# -*- coding: utf-8 -*-
#
# This file is part of the SKA PST LMC project
#
# Distributed under the terms of the BSD 3-clause new license.
# See LICENSE for more info.
"""This module implements the PstManagement device."""
from __future__ import annotations
from typing import Optional
from ska_tango_base.csp.controller_device import CspSubElementController
from tango.server import device_property, run
# PyTango imports
# from tango import AttrQuality, AttrWriteType, DebugIt, DevState, DispLevel, PipeWriteType
# from tango.server import Device, attribute, command, device_property, run
__all__ = ["PstManagement", "main"]
class PstManagement(CspSubElementController):
"""An implementation of a Maser Tango device for PST.LMC.
**Properties:**
- Device Property
BeamFQDN
- Address of the Beam capability TANGO device
- Type:'DevString'
BeamServerFQDN
- Address of the BeamServer TANGO device
- Type:'DevString'
"""
# -----------------
# Device Properties
# -----------------
BeamFQDN = device_property(
dtype="DevString",
)
BeamServerFQDN = device_property(
dtype="DevString",
)
# ---------------
# General methods
# ---------------
def init_device(self: PstManagement) -> None:
"""Intialise the attributes and properties of the PstManagement device.
This overrides the :py:class:`CspSubElementController`.
"""
CspSubElementController.init_device(self)
self.set_change_event("adminMode", True, True)
self.set_archive_event("adminMode", True, True)
self.set_change_event("longRunningCommandsInQueue", True, True)
self.set_change_event("longRunningCommandStatus", True, True)
self.set_change_event("longRunningCommandProgress", True, True)
self.set_change_event("longRunningCommandResult", True, True)
def always_executed_hook(self: PstManagement) -> None:
"""Execute call before any TANGO command is executed."""
def delete_device(self: PstManagement) -> None:
"""Delete resources allocated in init_device.
This method allows for any memory or other resources allocated in the
init_device method to be released. This method is called by the device
destructor and by the device Init command.
"""
# ----------
# Attributes
# ----------
# --------
# Commands
# --------
# ----------
# Run server
# ----------
def main(args: Optional[list] = None, **kwargs: dict) -> int:
"""
Entry point for module.
:param args: positional arguments
:param kwargs: named arguments
:return: exit code
:rtype: int
"""
return run((PstManagement,), args=args, **kwargs)
if __name__ == "__main__":
main()
| 2,846 | 829 |
import sys,os
import argparse as arg
import nmap
import urllib2
parser = arg.ArgumentParser()
parser.add_argument("-a", "--address", help="IP address", required=True)
parser.add_argument("-i", "--interface", help="Interface", required=True)
argument = parser.parse_args()
def scan_(ip):
dict_ = []
scan = nmap.PortScanner().scan(hosts=ip + '/24',arguments='-sn ', sudo=True).get('scan')
for i in scan.keys():
mac = scan.get(scan.keys()[scan.keys().index(i)]).get('addresses').get('mac')
dict_.append({'ip':i, 'mac':mac})
return dict_
def change(ip,mac,iface):
if mac is not None:
gw_arr = ip.split(".")
gw_arr[3] = "1"
gw_address = ".".join(gw_arr)
os.system("ip link set "+iface+" down")
os.system("ip link set dev "+iface+" address "+mac)
os.system("ip link set "+iface+" up")
os.system("ip addr flush dev "+iface)
os.system("ifconfig "+iface+" "+ip+" netmask 255.255.255.0 up")
os.system("route add default gw "+gw_address)
print ("Testing IP %s and MAC %s"%(ip,mac))
os.system("ping -c 2 8.8.8.8")
try:
urllib2.urlopen('http://216.58.192.142', timeout=1)
return True
except:
return False
def main():
res = scan_(argument.address)
print "IP and Mac list"
for j in sorted(res, key = lambda i: i['ip']):
print ("IP :%s / MAC: %s"%(j['ip'],j['mac']))
for j in sorted(res, key = lambda i: i['ip']):
internet_connection = change(j['ip'],j['mac'],argument.interface)
if internet_connection:
break
main()
| 1,469 | 606 |
import argparse
from cartografo import DEFAULT_OBJECT, DEFAULT_TARGET
def __get_argparser():
__parser.add_argument('--k8s-object', help='Output Kubernetes objet: secrets or configmap')
__parser.add_argument('--target', help='Target file. If it exists, it will be modified')
__parser.add_argument('files_folder', help='Folder where the actual files are.')
__parser = argparse.ArgumentParser()
__get_argparser()
__arguments = __parser.parse_args()
def get_arguments():
return {
"object": __arguments.k8s_object if __arguments.k8s_object else DEFAULT_OBJECT,
"target": __arguments.target if __arguments.target else DEFAULT_TARGET,
"source": __arguments.files_folder
} | 713 | 213 |
L = ['michael', 'sarah', 'tracy', 'bob', 'jack']
# 取前N个元素
r = []
n = 3
for i in range(n):
r.append(L[i])
print(r)
# python提供slice操作符简化.
m = 0
print(L[m:n], L[:n], L[-n:-1])
L = list(range(100))
print(L[:10], '\r', L[-10:], '\r', L[10:20], '\r', L[:10:2], '\r', L[::5])
print((0, 1, 2, 3, 4, 5)[:3])
print('ABCDEFG'[:3], 'ABCDEFG'[::2])
| 343 | 211 |
'''
Created on Feb 17, 2014
@author: magus0219
'''
import unittest,datetime
from util.dateutil import DateUtil
from core.timematcher import TimeMatcher
from core.timepattern import TimePattern
class TimeMatcherTest(unittest.TestCase):
@unittest.expectedFailure
def testUnvaidValueNotInt(self):
TimeMatcher.matchOneUnit("*", "dsf")
@unittest.expectedFailure
def testUnvaidValueNegitiveInt(self):
TimeMatcher.matchOneUnit("*", -2)
@unittest.expectedFailure
def testUnvaidValuePattern1(self):
TimeMatcher.matchOneUnit("fjf/2", 1)
@unittest.expectedFailure
def testUnvaidValuePattern2(self):
TimeMatcher.matchOneUnit("sdf", 1)
@unittest.expectedFailure
def testUnvaidValuePattern3(self):
TimeMatcher.matchOneUnit("*/sd", 1)
def testMatchOnePattern(self):
self.assertEqual(True, TimeMatcher.matchOneUnit("*", 1))
self.assertEqual(True, TimeMatcher.matchOneUnit("*", 24))
self.assertEqual(True, TimeMatcher.matchOneUnit("*/2", 22))
self.assertEqual(False, TimeMatcher.matchOneUnit("*/2", 13))
self.assertEqual(True, TimeMatcher.matchOneUnit("*/5", 15))
self.assertEqual(False, TimeMatcher.matchOneUnit("*/5", 13))
self.assertEqual(True, TimeMatcher.matchOneUnit("23", 23))
self.assertEqual(False, TimeMatcher.matchOneUnit("23", 13))
def testMatchTimePattern(self):
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("* * * * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("28 * * * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("* 20 * * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("* * 17 * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("* * * 2 *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("* * * * 1"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("28 20 17 2 1"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(True, TimeMatcher.matchTimePattern(TimePattern("*/2 * * * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
self.assertEqual(False, TimeMatcher.matchTimePattern(TimePattern("*/3 * * * *"),
DateUtil.datetime("2014-02-17 20:28:35")))
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testUnvaidValue']
unittest.main()
| 3,330 | 1,076 |
from django.conf.urls import url
from api import views
urlpatterns = [
url(r'^pets/$', views.ListPets.as_view(), name='list_pets'),
url(r'^cities/$', views.CityList.as_view(), name='city-list'),
url(r'^states/$', views.StateList.as_view(), name='state-list'),
]
| 276 | 104 |
#!/usr/bin/env python
# This is not an officially supported Google product, though support
# will be provided on a best-effort basis.
# Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ujson
import webapp2
import utilities
class main(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type"] = "application/json"
self.response.headers.add_header(
"Cache-Control",
"no-cache, no-store, must-revalidate, max-age=0"
)
self.response.headers.add_header(
"Expires",
"0"
)
try:
globalId = self.request.get("gId")
sqlCmd = "update meetingRegistry set beenIndexed = %s where globalId = %s"
sqlData = (1, globalId)
resultList = utilities.dbExecution(sqlCmd, sqlData)
outputStr = str(resultList)
except:
outputStr = None
resultObj = {}
resultObj["response"] = outputStr
self.response.out.write(ujson.dumps(resultObj))
app = webapp2.WSGIApplication([
("/toggleIndex", main)], debug = True
)
| 1,503 | 516 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""=================================================
@Author :蒋虎成
@Date :2021/9/22 17:04
@Desc :主接口
=================================================="""
from colors import ColorMultiImage
import settings
from model import training
import csv
if __name__ == '__main__':
generate_color = ColorMultiImage()
stickers = settings.module # 设置模组已经内置mouse和cattle
if settings.train:
color_model_path = training(settings.color_data_path)
print("颜色模型生成路径:" + color_model_path)
if settings.color_style == 1:
f = open(settings.color_model_path, "r+", encoding="utf-8-sig")
reader = csv.reader(f)
colors_max = len(list(reader))
print(f"当前为艺术家风格,当前模型可用颜色数为{colors_max}个")
for amount in range(0, settings.n): # 设置生成数量
pixel = generate_color.merges(stickers)
colors_number = generate_color.colors_number
generate_color.generate(pixel, settings.color_output_name,str(amount),settings.color_model_path,settings.color_style,colors_number)
print(f"INFO:生成第{str(amount)}个{settings.color_output_name}") | 1,131 | 403 |
#! /usr/bin/env python
"""
usage: demoselsim.py outfilename popn h pct
"""
import numpy
import sys
def pnext(AC, N, Nout, h, s):
AC = float(AC); N = float(N); h = float(h); s = float(s)
p = AC/(2*N)
w11 = 1+s; w12 = 1+h*s; w22 = 1
wbar = ((p**2) * w11) + (2*p*(1-p)*w12) + (((1-p)**2) * w22)
pdet = p*(p*w11 + (1-p)*w12)/wbar
#print '{0}: {1} {2} {3}'.format(step, N, AC,pdet)
x=numpy.random.binomial(2*Nout, pdet)
return x
def trajecvec(h, s, popn, pct):
if popn == "EUR": #initialize EUR
tNcurr = 620
tB1 = 720
tAdmix = 1900
N = 10085
NB1 = 549
Nfinal = 85
elif popn == "ASN": #initialize ASN
N = 10063
tNcurr = 540
NB1 = 407
tAdmix = 1900
tB1 = 640
Nfinal = 97
#get interval points
first_point = tAdmix-tB1
second_point = tAdmix-tNcurr
AC = int(round(N*2*(pct/100.)))
AC = [AC]
for t in range(0, first_point): #current population
AC.append(pnext(AC[t], N, N, h, s))
if (AC[t+1] == 0) or (AC[t+1] == 2*N):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
AC.append(pnext(AC[first_point], N, NB1, h, s))
if (AC[first_point+1] == 0) or (AC[first_point+1] == 2*NB1):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
for t in range(first_point+1, second_point):
AC.append(pnext(AC[t], NB1, NB1, h, s))
if (AC[t+1] == 0) or (AC[t+1] == 2*NB1):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
AC.append(pnext(AC[second_point], NB1, N, h, s))
if (AC[second_point+1] == 0) or (AC[second_point+1] == 2*N):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
for t in range(second_point+1, tAdmix):
AC.append(pnext(AC[t], N, N, h, s))
if (AC[t+1] == 0) or (AC[t+1] == 2*N):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
AC.append(pnext(AC[t], N, Nfinal, h, s))
if (AC[-1] == 0) or (AC[t+1] == 2*Nfinal):
AC[-1] = 'FIXED:{0}'.format(AC[-1])
return AC
def checkint(str):
try:
int(str)
return True
except ValueError:
return False
def summary(AC):
if checkint(AC[-1]):
return 'SEG:{0}'.format(AC[-1])
elif AC[-1] == 'FIXED:0':
time = len(AC)
return 'LOST:{0}'.format(time)
else:
time = len(AC)
return 'FIXED:{0}'.format(time)
def main():
outfilename = sys.argv[1]
popn = sys.argv[2]
h = float(sys.argv[3])
pct = int(sys.argv[4])
niter = 1000000
outfile = open(outfilename, 'w')
results_vec = []
for i in xrange(niter):
s = numpy.random.gamma(0.184, 8200)
s = s/(2.*25636)
results_vec.append(summary(trajecvec(h, s, popn, pct)))
sys.stdout.write('{0}\r'.format(i))
lost_count = 0; fixed_count = 0; seg_count = 0
seg_vec = []; lost_vec = []; fixed_vec = []
for result in results_vec:
state, count = result.split(':')
if state == 'LOST':
#print state
lost_count += 1
lost_vec.append(int(count))
elif state == 'SEG':
#print state
seg_count += 1
seg_vec.append(int(count))
elif state == 'FIXED':
#print state
fixed_count += 1
fixed_vec.append(int(count))
if popn == "EUR": #initialize EUR
N = 85
elif popn == "ASN": #initialize ASN
N = 97
wfreq = numpy.mean([0]*lost_count + seg_vec + [1]*fixed_count)/(2.*N)
if seg_vec != []:
segfreq = numpy.mean(seg_vec)/(2.*N)
else:
segfreq = 0
s = "dist"
outstr = '{0} {1} {2} {3} {4}'.format(popn, h, s, wfreq, segfreq)
outfile.write(outstr)
outfile.close()
if __name__ == "__main__":
main()
| 3,345 | 1,756 |
import math
import torch
import numpy as np
import torch.nn as nn
class PositionalEncoding(nn.Module):
def __init__(self, d_model, seq_len) -> None:
super(PositionalEncoding, self).__init__()
self.d_model = d_model
pe = torch.zeros(seq_len, d_model)
for pos in range(seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / d_model)))
pe[pos, i+1] = math.cos(pos / (10000 ** ((2 * (i+1)) / d_model)))
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x) -> torch.Tensor:
seq_len = x.shape[1]
x = math.sqrt(self.d_model) * x
x = x + self.pe[:, :seq_len].requires_grad_(False)
return x
class ResidualBlock(nn.Module):
def __init__(self, layer: nn.Module, embed_dim: int, p=0.1) -> None:
super(ResidualBlock, self).__init__()
self.layer = layer
self.dropout = nn.Dropout(p=p)
self.norm = nn.LayerNorm(embed_dim)
self.attn_weights = None
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
:param x: [N, seq_len, features]
:return: [N, seq_len, features]
"""
if isinstance(self.layer, nn.MultiheadAttention):
src = x.transpose(0, 1) # [seq_len, N, features]
output, self.attn_weights = self.layer(src, src, src)
output = output.transpose(0, 1) # [N, seq_len, features]
else:
output = self.layer(x)
output = self.dropout(output)
output = self.norm(x + output)
return output
class PositionWiseFeedForward(nn.Module):
def __init__(self, hidden_size: int) -> None:
super(PositionWiseFeedForward, self).__init__()
self.hidden_size = hidden_size
self.conv = nn.Sequential(
nn.Conv1d(hidden_size, hidden_size * 2, 1),
nn.ReLU(),
nn.Conv1d(hidden_size * 2, hidden_size, 1)
)
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
tensor = tensor.transpose(1, 2)
tensor = self.conv(tensor)
tensor = tensor.transpose(1, 2)
return tensor
class EncoderBlock(nn.Module):
def __init__(self, embed_dim: int, num_head: int, dropout_rate=0.1) -> None:
super(EncoderBlock, self).__init__()
self.attention = ResidualBlock(
nn.MultiheadAttention(embed_dim, num_head), embed_dim, p=dropout_rate
)
self.ffn = ResidualBlock(PositionWiseFeedForward(embed_dim), embed_dim, p=dropout_rate)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.attention(x)
x = self.ffn(x)
return x
class DenseInterpolation(nn.Module):
def __init__(self, seq_len: int, factor: int) -> None:
"""
:param seq_len: sequence length
:param factor: factor M
"""
super(DenseInterpolation, self).__init__()
W = np.zeros((factor, seq_len), dtype=np.float32)
for t in range(seq_len):
s = np.array((factor * (t + 1)) / seq_len, dtype=np.float32)
for m in range(factor):
tmp = np.array(1 - (np.abs(s - (1+m)) / factor), dtype=np.float32)
w = np.power(tmp, 2, dtype=np.float32)
W[m, t] = w
W = torch.tensor(W).float().unsqueeze(0)
self.register_buffer("W", W)
def forward(self, x: torch.Tensor) -> torch.Tensor:
w = self.W.repeat(x.shape[0], 1, 1).requires_grad_(False)
u = torch.bmm(w, x)
return u.transpose_(1, 2)
class ClassificationModule(nn.Module):
def __init__(self, d_model: int, factor: int, num_class: int) -> None:
super(ClassificationModule, self).__init__()
self.d_model = d_model
self.factor = factor
self.num_class = num_class
self.fc = nn.Linear(int(d_model * factor), num_class)
nn.init.normal_(self.fc.weight, std=0.02)
nn.init.normal_(self.fc.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.contiguous().view(-1, int(self.factor * self.d_model))
x = self.fc(x)
return x
class RegressionModule(nn.Module):
def __init__(self, d_model: int, factor: int, output_size: int) -> None:
super(RegressionModule, self).__init__()
self.d_model = d_model
self.factor = factor
self.output_size = output_size
self.fc = nn.Linear(int(d_model * factor), output_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x.contiguous().view(-1, int(self.factor * self.d_model))
x = self.fc(x)
return x
| 4,692 | 1,682 |
"""
Common CLI options for invoke command
"""
import click
from samcli.commands._utils.options import template_click_option, docker_click_options, parameter_override_click_option
try:
from pathlib import Path
except ImportError:
from pathlib2 import Path
def get_application_dir():
"""
Returns
-------
Path
Path representing the application config directory
"""
# TODO: Get the config directory directly from `GlobalConfig`
return Path(click.get_app_dir("AWS SAM", force_posix=True))
def get_default_layer_cache_dir():
"""
Default the layer cache directory
Returns
-------
str
String representing the layer cache directory
"""
layer_cache_dir = get_application_dir().joinpath("layers-pkg")
return str(layer_cache_dir)
def service_common_options(port):
def construct_options(f):
"""
Common CLI Options that are shared for service related commands ('start-api' and 'start_lambda')
Parameters
----------
f function
Callback passed by Click
port int
port number to use
Returns
-------
function
The callback function
"""
service_options = [
click.option(
"--host", default="127.0.0.1", help="Local hostname or IP address to bind to (default: '127.0.0.1')"
),
click.option(
"--port", "-p", default=port, help="Local port number to listen on (default: '{}')".format(str(port))
),
]
# Reverse the list to maintain ordering of options in help text printed with --help
for option in reversed(service_options):
option(f)
return f
return construct_options
def invoke_common_options(f):
"""
Common CLI options shared by "local invoke" and "local start-api" commands
:param f: Callback passed by Click
"""
invoke_options = (
[
template_click_option(),
click.option(
"--env-vars",
"-n",
type=click.Path(exists=True),
help="JSON file containing values for Lambda function's environment variables.",
),
parameter_override_click_option(),
click.option(
"--debug-port",
"-d",
help="When specified, Lambda function container will start in debug mode and will expose this "
"port on localhost.",
envvar="SAM_DEBUG_PORT",
),
click.option(
"--debugger-path", help="Host path to a debugger that will be mounted into the Lambda container."
),
click.option(
"--debug-args", help="Additional arguments to be passed to the debugger.", envvar="DEBUGGER_ARGS"
),
click.option(
"--docker-volume-basedir",
"-v",
envvar="SAM_DOCKER_VOLUME_BASEDIR",
help="Specifies the location basedir where the SAM file exists. If the Docker is running on "
"a remote machine, you must mount the path where the SAM file exists on the docker machine "
"and modify this value to match the remote machine.",
),
click.option("--log-file", "-l", help="logfile to send runtime logs to."),
click.option(
"--layer-cache-basedir",
type=click.Path(exists=False, file_okay=False),
envvar="SAM_LAYER_CACHE_BASEDIR",
help="Specifies the location basedir where the Layers your template uses will be downloaded to.",
default=get_default_layer_cache_dir(),
),
]
+ docker_click_options()
+ [
click.option(
"--force-image-build",
is_flag=True,
help="Specify whether CLI should rebuild the image used for invoking functions with layers.",
envvar="SAM_FORCE_IMAGE_BUILD",
default=False,
)
]
)
# Reverse the list to maintain ordering of options in help text printed with --help
for option in reversed(invoke_options):
option(f)
return f
| 4,350 | 1,101 |
import threading
def merge_sort(arr):
def _merge(arr1, arr2):
i, j = 0, 0
l1, l2 = len(arr1), len(arr2)
arr_sorted = [0] * (l1 + l2)
idx = 0
while i < l1 and j < l2:
if arr1[i] < arr2[j]:
arr_sorted[idx] = arr1[i]
i += 1
else:
arr_sorted[idx] = arr2[j]
j += 1
idx += 1
while i < l1:
arr_sorted[idx] = arr1[i]
i += 1
idx += 1
while j < l2:
arr_sorted[idx] = arr2[j]
j += 1
idx += 1
return arr_sorted
def _recursive_sort(arr):
if len(arr) == 1:
return arr
mid = len(arr) // 2
left_arr = _recursive_sort(arr[:mid])
right_arr = _recursive_sort(arr[mid:])
return _merge(left_arr, right_arr)
return _recursive_sort(arr)
def merge_sort_inplace(arr):
def _merge(arr, start, mid, end):
start2 = mid + 1
while start <= mid and start2 <= end:
if arr[start] <= arr[start2]: # elem in right place
start += 1
else:
orig_start2 = arr[start2]
idx = start2
# shift all elements between start and start2
# to the right by one place
while idx != start:
arr[idx] = arr[idx - 1]
idx -= 1
arr[start] = orig_start2
start += 1
mid += 1
start2 += 1
def _recursive_sort(arr, left, right):
if left < right:
mid = left + ((right - left) // 2)
_recursive_sort(arr, left, mid)
_recursive_sort(arr, mid + 1, right)
_merge(arr, left, mid, right)
_recursive_sort(arr, 0, len(arr) - 1)
def _merge(arr, start, mid, end):
start2 = mid + 1
while start <= mid and start2 <= end:
if arr[start] <= arr[start2]: # elem in right place
start += 1
else:
orig_start2 = arr[start2]
idx = start2
# shift all elements to the right by one place
while idx != start:
arr[idx] = arr[idx - 1]
idx -= 1
arr[start] = orig_start2
start += 1
mid += 1
start2 += 1
def _recursive_sort(arr, left, right):
if left < right:
mid = left + ((right - left) // 2)
_recursive_sort(arr, left, mid)
_recursive_sort(arr, mid + 1, right)
_merge(arr, left, mid, right)
# _recursive_sort(arr, 0, len(arr) - 1)
if __name__ == "__main__":
ar = [2, 4, 1, 2, 4, 5, 8, 2, 351, 2, 0]
thread1 = threading.Thread(
target=_recursive_sort, args=(ar, 0, len(ar) // 2),)
thread2 = threading.Thread(
target=_recursive_sort, args=(ar, (len(ar) // 2) + 1, len(ar) - 1,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
_merge(ar, 0, len(ar) // 2, len(ar) - 1)
print(ar)
| 3,083 | 1,066 |
"""
Deploy NodeJs application
https://github.com/react-boilerplate/react-boilerplate
"""
from py_mina import *
from py_mina.subtasks import git_clone, create_shared_paths, link_shared_paths, rollback_release
# Settings - shared
set('verbose', True)
set('shared_dirs', ['node_modules', 'tmp'])
set('shared_files', [])
# Tasks
@task
def restart():
"""
Restarts application on remote server
"""
# read README.md
run('sudo monit restart -g nodejs_app_prod')
@deploy_task(on_success=restart)
def deploy():
"""
Runs deploy process on remote server
"""
git_clone()
link_shared_paths()
run('npm install')
run('npm run build')
@setup_task
def setup():
"""
Runs setup process on remote server
"""
create_shared_paths()
@task
def rollback():
"""
Rollbacks to previous release
"""
rollback_release()
| 830 | 308 |
import abc
import json
from typing import Any, Dict, IO, Iterable, List, Optional, Union
import altair as alt
from altair_saver.types import Mimebundle, MimebundleContent, JSONDict
from altair_saver._utils import (
extract_format,
fmt_to_mimetype,
infer_mode_from_spec,
maybe_open,
)
class Saver(metaclass=abc.ABCMeta):
"""
Base class for saving Altair charts.
Subclasses should:
- specify the valid_formats class attribute
- override the _serialize() method
"""
# list of supported formats, or (mode, format) pairs.
valid_formats: Dict[str, List[str]] = {"vega": [], "vega-lite": []}
_spec: JSONDict
_mode: str
_embed_options: JSONDict
_package_versions: Dict[str, str]
def __init__(
self,
spec: JSONDict,
mode: Optional[str] = None,
embed_options: Optional[JSONDict] = None,
vega_version: str = alt.VEGA_VERSION,
vegalite_version: str = alt.VEGALITE_VERSION,
vegaembed_version: str = alt.VEGAEMBED_VERSION,
**kwargs: Any,
):
if mode is None:
mode = infer_mode_from_spec(spec)
if mode not in ["vega", "vega-lite"]:
raise ValueError("mode must be either 'vega' or 'vega-lite'")
self._spec = spec
self._mode = mode
self._embed_options = embed_options or {}
self._package_versions = {
"vega": vega_version,
"vega-lite": vegalite_version,
"vega-embed": vegaembed_version,
}
@abc.abstractmethod
def _serialize(self, fmt: str, content_type: str) -> MimebundleContent:
...
@classmethod
def enabled(cls) -> bool:
"""Return true if this saver is enabled on the current system."""
return True
def mimebundle(self, fmts: Union[str, Iterable[str]]) -> Mimebundle:
"""Return a mimebundle representation of the chart.
Parameters
----------
fmts : list of strings
A list of formats to include in the results.
Returns
-------
mimebundle : dict
The chart's mimebundle representation.
"""
if isinstance(fmts, str):
fmts = [fmts]
bundle: Mimebundle = {}
for fmt in fmts:
if fmt not in self.valid_formats[self._mode]:
raise ValueError(
f"invalid fmt={fmt!r}; must be one of {self.valid_formats[self._mode]}."
)
mimetype = fmt_to_mimetype(
fmt,
vega_version=self._package_versions["vega"],
vegalite_version=self._package_versions["vega-lite"],
)
bundle[mimetype] = self._serialize(fmt, "mimebundle")
return bundle
def save(
self, fp: Optional[Union[IO, str]] = None, fmt: Optional[str] = None
) -> Optional[Union[str, bytes]]:
"""Save a chart to file
Parameters
----------
fp : file or filename (optional)
Location to save the result. For fmt in ["png", "pdf"], file must be binary.
For fmt in ["svg", "vega", "vega-lite"], file must be text. If not specified,
the serialized chart will be returned.
fmt : string (optional)
The format in which to save the chart. If not specified and fp is a string,
fmt will be determined from the file extension.
Returns
-------
chart : string, bytes, or None
If fp is None, the serialized chart is returned.
If fp is specified, the return value is None.
"""
if fmt is None:
if fp is None:
raise ValueError("Must specify either `fp` or `fmt` when saving chart")
fmt = extract_format(fp)
if fmt not in self.valid_formats[self._mode]:
raise ValueError(f"Got fmt={fmt}; expected one of {self.valid_formats}")
content = self._serialize(fmt, "save")
if fp is None:
if isinstance(content, dict):
return json.dumps(content)
return content
if isinstance(content, dict):
with maybe_open(fp, "w") as f:
json.dump(content, f, indent=2)
elif isinstance(content, str):
with maybe_open(fp, "w") as f:
f.write(content)
elif isinstance(content, bytes):
with maybe_open(fp, "wb") as f:
f.write(content)
else:
raise ValueError(
f"Unrecognized content type: {type(content)} for fmt={fmt!r}"
)
return None
| 4,653 | 1,349 |
# Import our project so that our custom logging gets setup early enough
from __future__ import annotations
import mcookbook # noqa: F401 # pylint: disable=unused-import
| 172 | 50 |
from .exceptions import AccessDenied
class AccessControlMixin(object):
def user_can_edit(self, user):
return (user.is_superuser or self.user == user)
def check_write_privileges(self, user):
if not self.user_can_edit(user):
raise AccessDenied()
def check_read_privileges(self, user):
if not self.user_can_view(user):
raise AccessDenied()
| 401 | 126 |
import os
from bs4 import BeautifulSoup
import html2text
import pandas
data_dir = 'co2-coalition'
data_text_dir = os.path.join(data_dir, 'text')
data_file_name = 'co2-coalition.csv'
def make_file_name(index):
return f'{index:02d}'
def save_text(data_dir, file_path, content):
f = open(os.path.join(data_dir, file_path), 'w')
f.write(content)
f.close()
def get_text(soup, tag, tag_class, do_strip = False):
text = html_converter.handle(str(soup.find(tag, tag_class)))
if(do_strip):
text = text.strip()
return text
html_converter = html2text.HTML2Text()
html_converter.body_width = 0
html_converter.ignore_images = True
f = open('html/faq.html', 'r')
content = f.read()
f.close()
faq_soup = BeautifulSoup(content, 'html.parser')
entries = {
'id' : [],
'title' : [],
'text_file_name' : [],
}
entry_index = 0
title = html_converter.handle(str(faq_soup.find('span', 'span-title2'))).strip()
content = html_converter.handle(str(faq_soup.find('p', 'p1')))
text_file_name = make_file_name(entry_index) + '.txt'
save_text(data_text_dir, text_file_name, content)
entries['id'].append(entry_index)
entries['title'].append(title)
entries['text_file_name'].append(text_file_name)
entry_index += 1
faq_entries_container = faq_soup.find('div', 'vc_tta-panels-container')
faq_entries = faq_entries_container.find_all('div', 'vc_tta-panel')
print(f'Found {len(faq_entries)} entries')
for entry in faq_entries:
title = get_text(entry, 'span', 'vc_tta-title-text', do_strip = True).capitalize()
print(f' Entry {entry_index} : {title}')
content = get_text(entry.find('div', 'vc_tta-panel-body'), 'div', 'wpb_wrapper')
text_file_name = make_file_name(entry_index) + '.txt'
save_text(data_text_dir, text_file_name, content)
entries['id'].append(entry_index)
entries['title'].append(title)
entries['text_file_name'].append(text_file_name)
entry_index += 1
d = pandas.DataFrame(entries)
d.to_csv(data_file_name, index = False)
| 1,976 | 779 |
import random
def rand_proc(num_proc):
procesos = []
for j in range(0,num_proc):
t_0 = random.randint(1,10+1)
t =random.randint(1,10+1)
procesos.append([j,t_0,t])
return procesos
| 218 | 90 |
"""Test functions for FOOOF analysis."""
import numpy as np
from fooof.analysis import *
###################################################################################################
###################################################################################################
def test_get_band_peak_fm(tfm):
assert np.all(get_band_peak_fm(tfm, (8, 12)))
def test_get_band_peaks_fg(tfg):
assert np.all(get_band_peaks_fg(tfg, (8, 12)))
def test_get_band_peaks_group():
dat = np.array([[10, 1, 1.8, 0], [13, 1, 2, 2], [14, 2, 4, 2]])
out1 = get_band_peaks_group(dat, [8, 12], 3)
assert out1.shape == (3, 3)
assert np.array_equal(out1[0, :], [10, 1, 1.8])
out2 = get_band_peaks_group(dat, [12, 16], 3)
assert out2.shape == (3, 3)
assert np.array_equal(out2[2, :], [14, 2, 4])
def test_get_band_peak():
dat = np.array([[10, 1, 1.8], [14, 2, 4]])
# Test single result
assert np.array_equal(get_band_peak(dat, [10, 12]), [10, 1, 1.8])
# Test no results - returns nan
assert np.all(np.isnan(get_band_peak(dat, [4, 8])))
# Test muliple results - return all
assert np.array_equal(get_band_peak(dat, [10, 15], ret_one=False), [[10, 1, 1.8], [14, 2, 4]])
# Test multiple results - return one
assert np.array_equal(get_band_peak(dat, [10, 15], ret_one=True), [14, 2, 4])
def test_get_highest_peak():
dat = np.array([[10, 1, 1.8], [14, 2, 4], [12, 3, 2]])
assert np.array_equal(get_highest_peak(dat), [12, 3, 2])
def test_empty_inputs():
dat = np.empty(shape=[0, 3])
assert np.all(get_band_peak(dat, [8, 12]))
assert np.all(get_highest_peak(dat))
dat = np.empty(shape=[0, 4])
assert np.all(get_band_peaks_group(dat, [8, 12], 0))
| 1,753 | 775 |
import math
# The code is based on from http://www.cs.cmu.edu/~ckingsf/class/02713-s13/src/mst.py
# Heap item
class HeapItem(object):
"""Represents an item in the heap"""
def __init__(self, key, value):
self.key = key
self.pos = None
self.value = value
# d-ary Heap
class Heap():
def __init__(self, dary=2):
self.heap = []
self.dary = dary
def siftdown(self, node, pos):
""" Move node down in the tree; restore heap condition after deletion
or replacement. """
c = self.minchild(pos)
while c is not None and self.heap[c].key < node.key:
self.heap[pos] = self.heap[c]
self.heap[pos].pos = pos
pos = c
c = self.minchild(c)
self.heap[pos] = node
node.pos = pos
def siftup(self, node, pos):
"""Move hi up in heap until it's parent is smaller than hi.key"""
p = self.parent(pos)
while p is not None and self.heap[p].key > node.key:
self.heap[pos] = self.heap[p]
self.heap[pos].pos = pos
pos = p
p = self.parent(p)
self.heap[pos] = node
node.pos = pos
def findmin(self):
"""Return element with smallest key, or None if heap is empty"""
return self.heap[0] if len(self.heap) > 0 else None
def extractmin(self):
"""Delete the smallest item"""
if len(self.heap) == 0:
return None
i = self.heap[0]
last = self.heap[-1]
del self.heap[-1]
if len(self.heap) > 0:
self.siftdown(last, 0)
return i
def insert(self, key, value):
"""Insert an item into the heap"""
self.heap.append(None)
hi = HeapItem(key,value)
self.siftup(hi, len(self.heap)-1)
return hi
def decreasekey(self, node, newkey):
"""Decrease the key of hi to newkey"""
node.key = newkey
self.siftup(node, node.pos)
def parent(self, pos):
"""Return the position of the parent of pos"""
if pos == 0:
return None
return int(math.ceil(pos / self.dary) - 1)
def children(self, pos):
"""Return a list of children of pos"""
return range(self.dary * pos + 1, min(self.dary * (pos + 1) + 1, len(self.heap)))
def minchild(self, pos):
"""Return the child of pos with the smallest key"""
minpos = minkey = None
for c in self.children(pos):
if minkey == None or self.heap[c].key < minkey:
minkey, minpos = self.heap[c].key, c
return minpos
| 2,677 | 892 |
#!/usr/bin/env python
# coding: utf-8
# In[5]:
import cv2
import numpy as np
imagen = cv2.imread('wheel.png')
gray = cv2.cvtColor(imagen,cv2.COLOR_BGR2GRAY)
_,th = cv2.threshold(gray,100,255,cv2.THRESH_BINARY)
#Para versiones OpenCV3:
img1,contornos1,hierarchy1 = cv2.findContours(th, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
img2,contornos2,hierarchy2 = cv2.findContours(th, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(imagen, contornos1, -1, (0,0,255), 2)
print ('len(contornos1[2])=',len(contornos1[2]))
print ('len(contornos2[2])=',len(contornos2[2]))
cv2.imshow('imagen',imagen)
cv2.imshow('th',th)
cv2.waitKey(0)
cv2.destroyAllWindows()
# In[ ]:
| 679 | 344 |
# -*- coding: utf-8 -*-
#############################################################
# Copyright (c) 2020-2021 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# BSD 3-Clause "New" or "Revised" License #
#############################################################
"""functions to combine bound and unbound works
"""
import numpy as np
def combine_non_correlated_works(works_1, works_2):
"""combines 2 non correlated sets of work values
If you have 2 set of work values (for example
bound and unbound in the case of vDSSB) that are
un correlated you can combine them in order to
get N * M resulting works. It is equivalent to
convoluting the 2 probability distributions.
Parameters
------------
works_1 : numpy.array
the first set of works values to combine
works_2 : numpy.array
the second set of works values to combine
Returns
-----------
numpy.array :
a 1-D array N * M long containing the combined
work values
Notes
---------
for more information check out this paper:
Virtual Double-System Single-Box: A Nonequilibrium
Alchemical Technique for Absolute Binding Free Energy
Calculations: Application to Ligands of the SARS-CoV-2 Main Protease
Marina Macchiagodena, Marco Pagliai, Maurice Karrenbrock,
Guido Guarnieri, Francesco Iannone, and Piero Procacci
Journal of Chemical Theory and Computation 2020 16 (11), 7160-7172
DOI: 10.1021/acs.jctc.0c00634
section 2 "THEORETICAL BACKGROUND"
"""
#empty array N*M long
output_array = np.empty([works_1.size * works_2.size])
len_works_2 = len(works_2)
i = 0
iterator_1 = np.nditer(works_1)
for value_1 in iterator_1:
cutoff = i * len_works_2
output_array[cutoff:cutoff + len_works_2] = value_1 + works_2[:]
i += 1
return output_array
| 2,014 | 623 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import pointnet2_utils
import pytorch_utils as pt_utils
from typing import List
class PointSpatioTemporalCorrelation(nn.Module):
def __init__(
self,
radius: float,
nsamples: int,
in_channels: int,
out_channels: int
):
super().__init__()
self.radius = radius
self.nsamples = nsamples
self.in_channels = in_channels
self.fc = pt_utils.Conv2d(in_size=in_channels+out_channels+3, out_size=out_channels, activation=None, bn=None)
def forward(self, P1: torch.Tensor, P2: torch.Tensor, X1: torch.Tensor, S2: torch.Tensor) -> (torch.Tensor):
r"""
Parameters
----------
P1: (B, N, 3)
P2: (B, N, 3)
X1: (B, C, N)
S2: (B, C, N)
Returns
-------
S1: (B, C, N)
"""
# 1. Sample points
idx = pointnet2_utils.ball_query(self.radius, self.nsamples, P2, P1) # (B, npoint, nsample)
# 2.1 Group P2 points
P2_flipped = P2.transpose(1, 2).contiguous() # (B, 3, npoint)
P2_grouped = pointnet2_utils.grouping_operation(P2_flipped, idx) # (B, 3, npoint, nsample)
# 2.2 Group P2 states
S2_grouped = pointnet2_utils.grouping_operation(S2, idx) # (B, C, npoint, nsample)
# 3. Calcaulate displacements
P1_flipped = P1.transpose(1, 2).contiguous() # (B, 3, npoint)
P1_expanded = torch.unsqueeze(P1_flipped, 3) # (B, 3, npoint, 1)
displacement = P2_grouped - P1_expanded # (B, 3, npoint, nsample)
# 4. Concatenate X1, S2 and displacement
if self.in_channels != 0:
X1_expanded = torch.unsqueeze(X1, 3) # (B, C, npoint, 1)
X1_repeated = X1_expanded.repeat(1, 1, 1, self.nsamples)
correlation = torch.cat(tensors=(S2_grouped, X1_repeated, displacement), dim=1)
else:
correlation = torch.cat(tensors=(S2_grouped, displacement), dim=1)
# 5. Fully-connected layer (the only parameters)
S1 = self.fc(correlation)
# 6. Pooling
S1 = torch.max(input=S1, dim=-1, keepdim=False)[0]
return S1
class PointRNNCell(nn.Module):
def __init__(
self,
radius: float,
nsamples: int,
in_channels: int,
out_channels: int
):
super().__init__()
self.out_channels = out_channels
self.corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
def init_state(self, inputs: (torch.Tensor, torch.Tensor)):
P, _ = inputs
inferred_batch_size = P.size(0)
inferred_npoints = P.size(1)
device = P.get_device()
P = torch.zeros([inferred_batch_size, inferred_npoints, 3], dtype=torch.float32, device=device)
S = torch.zeros([inferred_batch_size, self.out_channels, inferred_npoints], dtype=torch.float32, device=device)
return P, S
def forward(self, inputs: (torch.Tensor, torch.Tensor), states: (torch.Tensor, torch.Tensor)=None) -> (torch.Tensor, torch.Tensor):
if states is None:
states = self.init_state(inputs)
P1, X1 = inputs
P2, S2 = states
S1 = self.corr(P1, P2, X1, S2)
return P1, S1
class PointGRUCell(nn.Module):
def __init__(
self,
radius: float,
nsamples: int,
in_channels: int,
out_channels: int
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.z_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.r_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.s_corr = PointSpatioTemporalCorrelation(radius, nsamples, 0, out_channels)
self.sigmoid = nn.Sigmoid()
self.fc = pt_utils.Conv1d(in_size=in_channels+out_channels, out_size=out_channels, activation=None, bn=None)
self.tanh = nn.Tanh()
def init_state(self, inputs: (torch.Tensor, torch.Tensor)):
P, _ = inputs
inferred_batch_size = P.size(0)
inferred_npoints = P.size(1)
device = P.get_device()
P = torch.zeros([inferred_batch_size, inferred_npoints, 3], dtype=torch.float32, device=device)
S = torch.zeros([inferred_batch_size, self.out_channels, inferred_npoints], dtype=torch.float32, device=device)
return P, S
def forward(self, inputs: (torch.Tensor, torch.Tensor), states: (torch.Tensor, torch.Tensor)=None) -> (torch.Tensor, torch.Tensor):
if states is None:
states = self.init_state(inputs)
P1, X1 = inputs
P2, S2 = states
Z = self.z_corr(P1, P2, X1, S2)
R = self.r_corr(P1, P2, X1, S2)
Z = self.sigmoid(Z)
R = self.sigmoid(R)
S_old = self.s_corr(P1, P2, None, S2)
if self.in_channels == 0:
S_new = R*S_old
else:
S_new = torch.cat(tensors=[X1, R*S_old], dim=1)
S_new = self.tanh(S_new)
S1 = Z * S_old + (1 - Z) * S_new
return P1, S1
class PointLSTMCell(nn.Module):
def __init__(
self,
radius: float,
nsamples: int,
in_channels: int,
out_channels: int
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.i_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.f_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.o_corr = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.c_corr_new = PointSpatioTemporalCorrelation(radius, nsamples, in_channels, out_channels)
self.c_corr_old = PointSpatioTemporalCorrelation(radius, nsamples, 0, out_channels)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def init_state(self, inputs: (torch.Tensor, torch.Tensor)):
P, _ = inputs
inferred_batch_size = P.size(0)
inferred_npoints = P.size(1)
device = P.get_device()
P = torch.zeros([inferred_batch_size, inferred_npoints, 3], dtype=torch.float32, device=device)
H = torch.zeros([inferred_batch_size, self.out_channels, inferred_npoints], dtype=torch.float32, device=device)
C = torch.zeros([inferred_batch_size, self.out_channels, inferred_npoints], dtype=torch.float32, device=device)
return P, H, C
def forward(self, inputs: (torch.Tensor, torch.Tensor), states: (torch.Tensor, torch.Tensor, torch.Tensor)=None) -> (torch.Tensor, torch.Tensor, torch.Tensor):
if states is None:
states = self.init_state(inputs)
P1, X1 = inputs
P2, H2, C2 = states
I = self.i_corr(P1, P2, X1, H2)
F = self.f_corr(P1, P2, X1, H2)
O = self.o_corr(P1, P2, X1, H2)
C_new = self.c_corr_new(P1, P2, X1, H2)
C_old = self.c_corr_old(P1, P2, None, C2)
I = self.sigmoid(I)
F = self.sigmoid(F)
O = self.sigmoid(O)
C_new = self.tanh(C_new)
C1 = F * C_old + I * C_new
H1 = O * self.tanh(C1)
return P1, H1, C1
if __name__ == '__main__':
radius = 1
nsamples = 4
in_channels = 128
out_channels = 256
lstm = PointLSTMCell(radius, nsamples, in_channels, out_channels).to('cuda')
batch_size = 32
npoints = 1024
P1 = torch.zeros([batch_size, npoints, 3], dtype=torch.float32).to('cuda')
X1 = torch.zeros([batch_size, in_channels, npoints], dtype=torch.float32).to('cuda')
P2 = torch.zeros([batch_size, npoints, 3], dtype=torch.float32).to('cuda')
H2 = torch.zeros([batch_size, out_channels, npoints], dtype=torch.float32).to('cuda')
C2 = torch.zeros([batch_size, out_channels, npoints], dtype=torch.float32).to('cuda')
P1, H1, C1 = lstm((P1, X1), (P2, H2, C2))
print(P1.shape)
print(H1.shape)
print(C1.shape)
| 8,622 | 3,199 |
from django import forms
class URLForm(forms.Form):
siteUrl = forms.CharField(label='Website Address', max_length=100,required=True)
'''
javascriptChoices = ((2,"Keep Javascript",),(1,"Remove Some Javascript"),(0,"Remove All Javascript"))
keepJavascript = forms.ChoiceField(choices=javascriptChoices,label=" Website Javascript")
'''
| 347 | 98 |
from flexmeasures.app import create as create_app
application = create_app()
| 78 | 22 |
from ctypes import Structure, Union, c_int, c_byte, c_char_p
# Inner union
class MBusAddressInternal(Union):
_fields_ = [
('primary', c_int),
('secondary', c_char_p),
]
class MBusAddress(Structure):
_fields_ = [
('is_primary', c_byte),
('_address', MBusAddressInternal),
]
@property
def pri_address(self):
if self.is_primary:
return self._address.primary
@pri_address.setter
def pri_address(self, address):
self._address.primary = address
self.is_primary = 1
@property
def sec_address(self):
if not self.is_primary:
return self._address.secondary
@sec_address.setter
def sec_address(self, address):
self._address.secondary = address
self.is_primary = 0
| 839 | 263 |
# -*- coding: utf-8 -*-
# Copy this file and renamed it settings.py and change the values for your own project
# The csv file containing the information about the member.
# There is three columns: The name, the email and the member type: 0 regular, 1 life time
CSV_FILE = "path to csv file"
# The svg file for regular member. {name} and {email} are going to be replaced with the corresponding values from the
# csv file
SVG_FILE_REGULAR = "path to svg regular member file"
# Same as SVG_FILE_REGULAR but for life time member
SVG_FILE_LIFE_TIME = "path to svg life time member file"
# Destination folder where the member cards will be generated. If the folder does not exist yet it will be created.
DEST_GENERATED_FOLDER = "path to folder that will contain the generated files"
# The message file used as the text body for the email message. UTF-8.
MSG_FILE = "/Users/pierre/Documents/LPA/CA/carte_membre_msg"
# SMTP configuration
SMPT_HOST = "myserver.com"
SMPT_PORT = 587
SMTP_USER = "user_name"
SMTP_PASSWORD = "password"
# Email configuration
EMAIL_FROM = "some_email@something.com"
EMAIL_SUBJECT = "subject"
EMAIL_PDF = "name of attachment file.pdf"
| 1,162 | 364 |
"""the names of companies which filed fields with certain words in them"""
from df_wrappers import facts_stringquery
def main():
"""example code lives in one function"""
# this one is easier in the script language
query_string = """
filingsource:"Korea FSS" AND
fieldname:(hedge OR (foreign AND exchange) OR (interest AND rate))
"""
# send off the query
resp_data = facts_stringquery(query_string, False)
# keep unique list of company names from results
name_list = {x['source']['companyname'] for x in resp_data['hits']}
for name in name_list:
print(str(name))
main()
# eof
| 633 | 192 |
DEBUG = False
from typing import List, Tuple
class LRU:
_values:List
_lastIndex:int
def __init__(self, ways):
#values = 3,2,1,0, so the cache will use 0 as the first
self._values = list(range(ways-1,-1,-1))
self._lastIndex = ways -1
def getLeastRecentlyUsed(self):
return self._values[self._lastIndex]
def pushToTop(self, element):
self._values.remove(element)
self._values.insert(0,element)
class CacheSimulator:
debug = False
#private fields
_lrus:List[LRU]
_cacheBlocks:List[List]
_blockSize:int
_blocks:int
_ways:int
#constructor
def __init__(self, blocks = 8, blockSize = 32, ways = 1):
self._blocks = blocks
self._blockSize = blockSize
self._ways = ways
self._cacheBlocks = list(map(lambda i: [-1]*ways, range(blocks)))
self._lrus = list(map(lambda i: LRU(ways), range(blockSize)))
def find(self, address:int) -> Tuple:
#calculating memory block and cache block index
memBlock = self.calculateMemBlock(address)
index = self.calculateCacheBlock(memBlock=memBlock)
cacheBlock = self._cacheBlocks[index]
#searching the memory block inside the cache block
lru = -1
hit = True
if(memBlock in cacheBlock):
#HIT, updating LRU before returning
lru = cacheBlock.index(memBlock)
if(self.debug): print(f"{address}: HIT (MEMBLOCK {memBlock} IS IN CACHEBLOCK {index}[{lru}])")
else:
#MISS, NEED TO LOAD BLOCK
lru = self._lrus[index].getLeastRecentlyUsed()
cacheBlock[lru] = memBlock
if(self.debug): print(f"{address}: MISS!!! MEMBLOCK {memBlock} -> CACHEBLOCK {index}[{lru}]")
hit = False
#updating LRU
self._lrus[index].pushToTop(lru)
if(self.debug): print(f"LAST USED BLOCK FOR CACHEBLOCK {index}: {lru}")
return hit, memBlock, index, lru
def hitOrMiss(self, address:int) -> bool:
hit, _, _, _ = self.find(address)
return hit
def calculateMemBlock(self, address:int) -> int:
return address // self._blockSize
def calculateCacheBlock(self, address:int = -1, memBlock:int = -1) -> int:
if(address != -1):
memBlock = self.calculateMemBlock(address)
return memBlock % self._blocks
| 2,413 | 778 |
"""
db_triggers.py
~~~~~~~~~~~~~~
:aciklama:
Veritabanina veri girisi ve gerekli triggerlar icin
:yazar: github.com/serong
"""
import sqlite3
import db as saydb
class SayisalDBT(object):
def __init__(self, week, the_date, numbers):
self.db_name = "sayisal.db"
self.weeks(week, the_date, numbers)
self.update_picked_table(numbers)
self.update_numbers_weeks_table(week, numbers)
self.update_numbers_group_table(week, numbers)
self.update_last_pick_table(week, numbers)
def weeks(self, week, the_date, numbers):
""" Veritabanina hafta girer.
:arguments:
:param week: haftanin sayisi.
:type week: int
:param the_date: haftanin tarihi.
:type the_date: str (2014-12-31)
:param numbers: sayilar.
:type numbers: tuple
:returns:
:rtype: bool
"""
connection = sqlite3.connect(self.db_name)
q = "INSERT INTO weeks VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
d = (week, the_date, numbers[0], numbers[1], numbers[2], numbers[3], numbers[4], numbers[5])
try:
connection.execute(q, d)
connection.commit()
return True
# Haftanin daha once girilmesi ihtimaline karsi.
except sqlite3.IntegrityError:
connection.close()
return False
def update_picked_table(self, numbers):
""" Hafta icin verilen sayilarin secilme miktarlarini bir arttirir.
:arguments:
:param numbers: sayilar
:type numbers: tuple
:returns:
:rtype: bool
"""
connection = sqlite3.connect(self.db_name)
for number in numbers:
q = "UPDATE numbers_picked SET picked = picked + 1 WHERE number = ?"
d = (number,)
connection.execute(q, d)
connection.commit()
connection.close()
return True
def update_numbers_weeks_table(self, week, numbers):
""" Verilen hafta icin numbers_weeks tablosunu guncelle.
"""
connection = sqlite3.connect(self.db_name)
for number in numbers:
q = "INSERT INTO numbers_weeks VALUES(NULL, ?, ?)"
d = (week, number)
connection.execute(q, d)
connection.commit()
connection.close()
return True
def update_numbers_group_table(self, week, numbers):
""" Sayi gruplarini gunceller.
:arguments:
:param numbers: sayilar.
:type numbers: tuple
:param week: hafta
:type week: int
"""
connection = sqlite3.connect(self.db_name)
# 1, 2, 3
q = "INSERT INTO numbers_group VALUES(NULL, ?, ?, ?, ?)"
d = (week, numbers[0], numbers[1], numbers[2])
connection.execute(q, d)
# 2, 3, 4
q = "INSERT INTO numbers_group VALUES(NULL, ?, ?, ?, ?)"
d = (week, numbers[1], numbers[2], numbers[3])
connection.execute(q, d)
# 3, 4, 5
q = "INSERT INTO numbers_group VALUES(NULL, ?, ?, ?, ?)"
d = (week, numbers[2], numbers[3], numbers[4])
connection.execute(q, d)
# 4, 5, 6
q = "INSERT INTO numbers_group VALUES(NULL, ?, ?, ?, ?)"
d = (week, numbers[3], numbers[4], numbers[5])
connection.execute(q, d)
connection.commit()
connection.close()
return True
def update_last_pick_table(self, week, numbers):
""" Cikan sayilar icin numbers_last_pick tablosunu guncelle.
"""
connection = sqlite3.connect(self.db_name)
for number in numbers:
q = "UPDATE numbers_last_pick SET last_pick = ? WHERE number = ?"
d = (week, number)
connection.execute(q, d)
connection.commit()
connection.close()
return True
| 4,015 | 1,231 |
import contextlib
import os
import pickle
import numpy as np
from itertools import product, starmap
import multiprocessing
import tqdm
import sys
if sys.platform.startswith("linux"):
is_linux = True
import tempfile
else:
is_linux = False
import mmap
# import pyina.launchers
# from pyina.ez_map import ez_map
# TODO: Convert utils.py module to use pathlib module
def make_dir(path):
"""Makes a new directory at the provided path only if it doesn't already exist.
Parameters
----------
path : str
The path of the directory to make
"""
if not os.path.exists(path):
os.makedirs(path)
return os.path.abspath(path)
def files_in_dir(path):
"""Searches a path for all files
Parameters
----------
path : str
The directory path to check for files
Returns
-------
list
list of all files and subdirectories in the input path (excluding . and ..)
"""
return sorted(os.listdir(path))
def tifs_in_dir(path):
"""Searches input path for tif files
Parameters
----------
path : str
path of the directory to check for tif images
Returns
-------
tif_paths : list
list of paths to tiffs in path
tif_filenames : list
list of tiff filenames (with the extension) in path
"""
abspath = os.path.abspath(path)
files = files_in_dir(abspath)
tif_paths = []
tif_filenames = []
for f in files:
if f.endswith('.tif') or f.endswith('.tiff'):
tif_paths.append(os.path.join(abspath, f))
tif_filenames.append(f)
return tif_paths, tif_filenames
def load_metadata(path):
"""Loads a metadata.pkl file within provided path
Parameters
----------
path : str
path of a directory containing a 'metadata.pkl' file
Returns
-------
dict
dictionary containing the stored metadata
"""
return pickle_load(os.path.join(path, 'metadata.pkl'))
def pickle_save(path, data):
"""Pickles data and saves it to provided path
Parameters
----------
path : str
path of the pickle file to create / overwrite
data : dict
dictionary with data to be pickled
"""
with open(path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def pickle_load(path):
"""Un-pickles a file provided at the input path
Parameters
----------
path : str
path of the pickle file to read
Returns
-------
dict
data that was stored in the input pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def chunk_dims(img_shape, chunk_shape):
"""Calculate the number of chunks needed for a given image shape
Parameters
----------
img_shape : tuple
whole image shape
chunk_shape : tuple
individual chunk shape
Returns
-------
nb_chunks : tuple
a tuple containing the number of chunks in each dimension
"""
return tuple(int(np.ceil(i/c)) for i, c in zip(img_shape, chunk_shape))
def chunk_coordinates(shape, chunks):
"""Calculate the global coordaintes for each chunk's starting position
Parameters
----------
shape : tuple
shape of the image to chunk
chunks : tuple
shape of each chunk
Returns
-------
start_coords : ndarray
the starting indices of each chunk
"""
nb_chunks = chunk_dims(shape, chunks)
start = []
for indices in product(*tuple(range(n) for n in nb_chunks)):
start.append(tuple(i*c for i, c in zip(indices, chunks)))
return np.asarray(start)
def box_slice_idx(start, stop):
"""Creates an index tuple for a bounding box from `start` to `stop` using slices
Parameters
----------
start : array-like
index of box start
stop : array-like
index of box stop (index not included in result)
Returns
-------
idx : tuple
index tuple for bounding box
"""
return tuple(np.s_[a:b] for a, b in zip(start, stop))
def extract_box(arr, start, stop):
"""Indexes `arr` from `start` to `stop`
Parameters
----------
arr : array-like or SharedMemory
input array to index
start : array-like
starting index of the slice
stop : array-like
ending index of the slice. The element at this index is not included.
Returns
-------
box : ndarray
resulting box from `arr`
"""
idx = box_slice_idx(start, stop)
if isinstance(arr, SharedMemory):
with arr.txn() as a:
box = a[idx]
else:
box = arr[idx]
return box
def insert_box(arr, start, stop, data):
"""Indexes `arr` from `start` to `stop` and inserts `data`
Parameters
----------
arr : array-like
input array to index
start : array-like
starting index of the slice
stop : array-like
ending index of the slice. The element at this index is not included.
data : array-like
sub-array to insert into `arr`
Returns
-------
box : ndarray
resulting box from `arr`
"""
idx = box_slice_idx(start, stop)
if isinstance(arr, SharedMemory):
with arr.txn() as a:
a[idx] = data
else:
arr[idx] = data
return arr
def pmap_chunks(f, arr, chunks=None, nb_workers=None, use_imap=False):
"""Maps a function over an array in parallel using chunks
The function `f` should take a reference to the array, a starting index, and the chunk size.
Since each subprocess is handling it's own indexing, any overlapping should be baked into `f`.
Caution: `arr` may get copied if not using memmap. Use with SharedMemory or Zarr array to avoid copies.
Parameters
----------
f : callable
function with signature f(arr, start_coord, chunks). May need to use partial to define other args.
arr : array-like
an N-dimensional input array
chunks : tuple, optional
the shape of chunks to use. Default tries to access arr.chunks and falls back to arr.shape
nb_workers : int, optional
number of parallel processes to apply f with. Default, cpu_count
use_imap : bool, optional
whether or not to use imap instead os starmap in order to get an iterator for tqdm.
Note that this requires input tuple unpacking manually inside of `f`.
Returns
-------
result : list
list of results for each chunk
"""
if chunks is None:
try:
chunks = arr.chunks
except AttributeError:
chunks = arr.shape
if nb_workers is None:
nb_workers = multiprocessing.cpu_count()
start_coords = chunk_coordinates(arr.shape, chunks)
args_list = []
for i, start_coord in enumerate(start_coords):
args = (arr, start_coord, chunks)
args_list.append(args)
if nb_workers > 1:
with multiprocessing.Pool(processes=nb_workers) as pool:
if use_imap:
results = list(tqdm.tqdm(pool.imap(f, args_list), total=len(args_list)))
else:
results = list(pool.starmap(f, args_list))
else:
if use_imap:
results = list(tqdm.tqdm(map(f, args_list), total=len(args_list)))
else:
results = list(starmap(f, args_list))
return results
def extract_ghosted_chunk(arr, start_coord, chunks, overlap):
end_coord = np.minimum(arr.shape, start_coord + np.asarray(chunks))
start_coord_ghosted = np.maximum(np.zeros(arr.ndim, 'int'),
np.array([s - overlap for s in start_coord]))
stop_coord_ghosted = np.minimum(arr.shape,
np.array([e + overlap for e in end_coord]))
ghosted_chunk = extract_box(arr, start_coord_ghosted, stop_coord_ghosted)
return ghosted_chunk, start_coord_ghosted, stop_coord_ghosted
def filter_points_in_box(coords, start, stop, return_idx=False):
interior_z = np.logical_and(coords[:, 0] >= start[0], coords[:, 0] < stop[0])
interior_y = np.logical_and(coords[:, 1] >= start[1], coords[:, 1] < stop[1])
interior_x = np.logical_and(coords[:, 2] >= start[2], coords[:, 2] < stop[2])
interior = np.logical_and(np.logical_and(interior_z, interior_y), interior_x)
loc = np.where(interior)
if return_idx:
return coords[loc], loc[0]
else:
return coords[loc]
def filter_ghosted_points(start_ghosted, start_coord, centers_local, chunks, overlap):
# filter nuclei on edges
if start_ghosted[0] < start_coord[0]:
interior_z = np.logical_and(centers_local[:, 0] >= overlap, centers_local[:, 0] < chunks[0] + overlap)
else:
interior_z = (centers_local[:, 0] < chunks[0])
if start_ghosted[1] < start_coord[1]:
interior_y = np.logical_and(centers_local[:, 1] >= overlap, centers_local[:, 1] < chunks[1] + overlap)
else:
interior_y = (centers_local[:, 1] < chunks[1])
if start_ghosted[2] < start_coord[2]:
interior_x = np.logical_and(centers_local[:, 2] >= overlap, centers_local[:, 2] < chunks[2] + overlap)
else:
interior_x = (centers_local[:, 2] < chunks[2])
interior = np.logical_and(np.logical_and(interior_z, interior_y), interior_x)
return centers_local[np.where(interior)]
def read_voxel_size(path, micron=True):
"""Reads in the voxel size stored in `path` CSV file with voxel dimensions in nanometers
:param path: path to CSV file containing integer values of voxel dimensions in nanometers
:param micron: Flag to return nanometers or micron
:return: voxel_size tuple in same order as in CSV
"""
with open(path, mode='r') as f:
line = f.readline().split('\n')[0]
dims = line.split(',')
voxel_size = tuple([int(d) / 1000 for d in dims])
return voxel_size
# mapper = None
#
#
# def parallel_map(fn, args):
# """Map a function over an argument list, returning one result per arg
#
# Parameters
# ----------
# fn : callable
# the function to execute
# args : list
# a list of the single argument to send through the function per invocation
#
# Returns
# -------
# list
# a list of results
#
# Notes
# -----
# The mapper is configured by two environment variables:
#
# PHATHOM_MAPPER - this is the name of one of the mapper classes. Typical
# choices are MpiPool or MpiScatter for OpenMPI and
# SlurmPool or SlurmScatter for SLURM. By default, it
# uses the serial mapper which runs on a single thread.
#
# PHATHOM_NODES - this is the number of nodes that should be used in
# parallel.
#
# By default, a serial mapper is returned if there is no mapper.
#
# Examples
# --------
# myresults = parallel_map(my_function, my_inputs)
#
# """
# global mapper
#
# if mapper is None:
# if "PHATHOM_MAPPER" in os.environ:
# mapper_name = os.environ["PHATHOM_MAPPER"]
# mapper_class = getattr(pyina.launchers, mapper_name)
# if "PHATHOM_NODES" in os.environ:
# nodes = os.environ["PHATHOM_NODES"]
# mapper = mapper_class(nodes)
# else:
# mapper = mapper_class()
# else:
# mapper = pyina.launchers.SerialMapper()
#
# return mapper.map(fn, args)
class SharedMemory:
"""A class to share memory between processes
Instantiate this class in the parent process and use in all processes.
For all but Linux, we use the mmap module to get a buffer for Numpy
to access through numpy.frombuffer. But in Linux, we use /dev/shm which
has no file backing it and does not need to deal with maintaining a
consistent view of itself on a disk.
Typical use:
shm = SharedMemory((100, 100, 100), np.float32)
def do_something():
with shm.txn() as a:
a[...] = ...
with multiprocessing.Pool() as pool:
pool.apply_async(do_something, args)
"""
if is_linux:
def __init__(self, shape, dtype):
"""Initializer
:param shape: the shape of the array
:param dtype: the data type of the array
"""
self.tempfile = tempfile.NamedTemporaryFile(
prefix="proc_%d_" % os.getpid(),
suffix=".shm",
dir="/dev/shm",
delete=True)
self.pathname = self.tempfile.name
self.shape = shape
self.dtype = np.dtype(dtype)
@contextlib.contextmanager
def txn(self):
""" A contextual wrapper of the shared memory
:return: a view of the shared memory which has the shape and
dtype given at construction
"""
memory = np.memmap(self.pathname,
shape=self.shape,
dtype=self.dtype)
yield memory
del memory
def __getstate__(self):
return self.pathname, self.shape, self.dtype
def __setstate__(self, args):
self.pathname, self.shape, self.dtype = args
else:
def __init__(self, shape, dtype):
"""Initializer
:param shape: the shape of the array
:param dtype: the data type of the array
"""
length = np.prod(shape) * dtype.itemsize
self.mmap = mmap.mmap(-1, length)
self.shape = shape
self.dtype = dtype
def txn(self):
""" A contextual wrapper of the shared memory
:return: a view of the shared memory which has the shape and
dtype given at construction
"""
memory = np.frombuffer(self.mmap, self.shape, self.dtype)
yield memory
del memory
def write_one_zarr(memory, zarr, offset, start, stop):
with memory.txn() as m:
zarr[offset[0]+start[0]:offset[0]+stop[0],
offset[1]+start[1]:offset[1]+stop[1],
offset[2]+start[2]:offset[2]+stop[2]] = \
m[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]
def shared_memory_to_zarr(memory, zarr, pool, offset, start=None, stop=None):
"""
Copy memory to ZARR array.
Note: offset, start and stop must be on chunk boundaries of the zarr array
:param memory: the memory array to copy to zarr
:param zarr: the zarr array
:param offset: the 3-tuple offset of the destination for the memory in
the zarr array
:param start: the 3-tuple start coordinates of the memory
:param stop: the 3-tuple stop coordinates of the memory
:param pool: the multiprocessing pool to use
"""
chunksize = zarr.chunks
shape = memory.shape
all_starts, all_stops = get_chunk_coords(chunksize, shape, start, stop)
args = [(memory, zarr, offset, a, b) for a, b in zip(all_starts, all_stops)]
pool.starmap(write_one_zarr, args)
def get_chunk_coords(chunksize, shape, start, stop):
"""
Get a sequence of chunk start coordinates and stop coordinates, given
a volume delimited by start and stop coordinates
:param chunksize: the size of a chunk in the zarr or blockfs array
:param shape: the shape of the zarr or blockfs array to handle edge case
:param start: a three-tuple of start coordinates, on a chunk boundary
:param stop: a three-tuple of stop coordinates, on a chunk boundary
:return: a sequence/iterator of start coordinates and of stop coordinates
giving the dimensions for each chunk in the volume
"""
if start is None:
start = (0, 0, 0)
if stop is None:
stop = shape
starts = [np.arange(a, b, c) for a, b, c in zip(start, stop, chunksize)]
stops = [np.minimum(a + b, c) for a, b, c in
zip(starts, chunksize, shape)]
all_starts = product(*starts)
all_stops = product(*stops)
return all_starts, all_stops
def memory_to_blockfs(memory, blockfs_dir, offset, start=None, stop=None):
"""
Write a block of memory to a Blockfs directory
:param memory: the memory to be written
:param blockfs_dir: the BlockFS directory to be written to. This must be
opened and the writer processes must have been started.
:param offset: the offset into the blockfs of the memory (a 3-tuple)
:param start: a three-tuple of start coordinates within the memory. Default
is from the start of memory.
:param stop: a three-tuple of stop coordinates within the memory. Default
is to the end of memory.
"""
chunksize = (blockfs_dir.z_block_size, blockfs_dir.y_block_size,
blockfs_dir.x_block_size)
shape = memory.shape
for (z0, y0, x0), (z1, y1, x1) in zip(*get_chunk_coords(
chunksize, shape, start, stop)):
blockfs_dir.write_block(memory[z0:z1, y0:y1, x0:x1],
x0 + offset[2], y0+offset[1], z0+offset[0]) | 17,088 | 5,350 |
from .result import Result
import numpy as np
import pandas as pd
class DiffAbunRes(Result):
def __init__(self, otu_table, transform_pipe=None, percent=False, **kwargs):
super().__init__()
self.pre_vs_skin = diff_rel_abun(otu_table, compare='pre_vs_skin', transform_pipe=transform_pipe,
percent=percent, **kwargs)
self.post_vs_skin = diff_rel_abun(otu_table, compare='post_vs_skin', transform_pipe=transform_pipe,
percent=percent, **kwargs)
self.pre_vs_post = diff_rel_abun(otu_table, compare='pre_vs_post', transform_pipe=transform_pipe,
percent=percent, **kwargs)
@classmethod
def load_default(cls, dataset='filtered', transform_pipe=None, percent=False, data_root=None, **kwargs):
from os import environ
if data_root is None:
if 'DATA_PATH' not in environ:
raise EnvironmentError('Please indicate the root to data folder')
else:
root = environ['DATA_PATH']
else:
root = data_root
from skin_mb.data import OtuTable
return cls(otu_table=OtuTable.from_biom(root + 'otu_tables/' + dataset + '.biom'),
transform_pipe=transform_pipe, percent=percent, **kwargs)
def heatmap(self, compare, ax=None, otu_list=None, subject_list=None,
adjust_patient_id=True, label_mapper=None, z_log=False, **kwargs):
dataset = self.get_data(compare)
if otu_list is None:
otu_list = dataset.index
else:
dataset = dataset.loc[otu_list]
if subject_list is None:
subject_list = dataset.columns
dataset = dataset[subject_list]
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(len(subject_list)/5, len(otu_list)/5))
ax = fig.add_subplot(111)
if adjust_patient_id:
id_mapper = {col: col-20 for col in dataset.columns}
dataset.rename(columns=id_mapper, inplace=True)
if label_mapper:
dataset.rename(index=label_mapper, inplace=True)
if 'figsize' in kwargs.keys():
_ = kwargs.pop('figsize')
if 'gridspec_kw' in kwargs.keys():
_ = kwargs.pop('gridspec_kw')
from ..visualizers.Heatmap import heatmap
from ..visualizers.PlotTools import CMapsDi
cmap = CMapsDi.BluWhtRed(reverse=True)
im = heatmap(values=dataset, ax=ax, origin='lower', z_log=z_log, zorder=5, nan_color='#AEAEAE', cmap=cmap, **kwargs)
ax.set_xlabel('Patient', fontsize=14)
return im, dataset
def rel_abun(table):
return table.apply(lambda row: row/row.sum(), axis=0)
def clr(table, pseudo_count=0.1):
def clr_row(row):
from scipy.stats import gmean
return np.log((row + pseudo_count)/gmean(row + pseudo_count))
return table.apply(clr_row, axis=0)
def diff(table, subject_list, postfix1, postfix2, percent=False):
diff_mtx = pd.DataFrame(index=table.index, columns=subject_list)
for subject in subject_list:
if percent:
diff_mtx[subject] = (table[str(subject) + postfix1] - table[str(subject) + postfix2])/table[str(subject) + postfix1]
else:
diff_mtx[subject] = table[str(subject) + postfix1] - table[str(subject) + postfix2]
return diff_mtx
def diff_rel_abun(otu_table, compare='wound_vs_skin', transform_pipe=None, pseudo_count=0.1,
percent=False, otu_list=None, subject_list=None):
if subject_list is None:
subject_list = set([int(sample[:2]) for sample in otu_table.sample_list])
if otu_list is None:
otu_list = otu_table.otu_list
if compare.lower() in ['pre_vs_skin']:
postfix1 = 'A'
postfix2 = 'C'
elif compare.lower() in ['pre_vs_post']:
postfix1 = 'A'
postfix2 = 'B'
elif compare.lower() in ['post_vs_skin']:
postfix1 = 'B'
postfix2 = 'C'
else:
raise ValueError("Compare should be 'pre_vs_skin', 'pre_vs_post', or 'post_vs_skin'")
if transform_pipe is None:
transform_pipe = ['rel abun', 'diff']
from functools import partial
transformations = {
'rel abun': rel_abun,
'clr': partial(clr, pseudo_count=pseudo_count),
'diff': partial(diff, subject_list=subject_list,
postfix1=postfix1, postfix2=postfix2, percent=percent)
}
table = otu_table.count_table.loc[otu_list]
for transform in transform_pipe:
table = transformations[transform](table)
return table
| 4,706 | 1,521 |
i = 0
while (i<119):
print(i)
i+=10
| 49 | 31 |
import numpy as np
import unittest
import coremltools.models.datatypes as datatypes
from coremltools.models import neural_network as neural_network
from coremltools.models import MLModel
from coremltools.models.neural_network.printer import print_network_spec
from coremltools.converters.nnssa.coreml.graph_pass.mlmodel_passes import \
remove_disconnected_layers, transform_conv_crop, remove_redundant_transposes
import copy
import pytest
DEBUG = False
np.random.seed(100)
class MLModelPassesTest(unittest.TestCase):
def test_load_constant_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
spec = builder.spec
np.testing.assert_equal(5, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
def test_dead_layer_remove(self):
input_features = [('data', datatypes.Array(*(3, 4)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_activation('relu1', 'RELU', 'data', 'relu1')
builder.add_load_constant_nd('const1', 'c1', constant_value=np.ones((5,)), shape=(5,))
builder.add_load_constant_nd('const2', 'c2', constant_value=np.ones((5,)), shape=(5,))
builder.add_split_nd('splitnd1', 'const2', ['s1', 's2', 's3'], axis=0, num_splits=3)
builder.add_squeeze('squeeze', 's1', 'squeeze_out')
builder.add_activation('relu4', 'RELU', 's2', 'relu4')
builder.add_activation('relu5', 'RELU', 'relu4', 'relu5')
builder.add_load_constant_nd('const3', 'c3', constant_value=np.ones((5,)), shape=(5,))
builder.add_activation('relu2', 'RELU', 'relu1', 'out')
spec = builder.spec
np.testing.assert_equal(9, len(spec.neuralNetwork.layers))
remove_disconnected_layers(spec)
np.testing.assert_equal(2, len(spec.neuralNetwork.layers))
@pytest.mark.xfail
def test_dead_layer_remove_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'input', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers), 1)
@pytest.mark.xfail
def test_dead_layer_partial_branch(self):
convergence_tolerance = 1e-8
input_features = [('input', datatypes.Array(*(2,)))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
# add condition to break from the loop, if convergence criterion is met
builder.add_less_than('cond', ['input'], 'cond', alpha=convergence_tolerance)
branch_layer = builder.add_branch('branch_layer', 'cond')
builder_ifbranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.ifBranch)
builder_ifbranch.add_activation('relu1', 'RELU', 'input', 'relu1_out')
builder_ifbranch.add_activation('relu2_out', 'RELU', 'relu1_out', 'relu2_out')
builder_elsebranch = neural_network.NeuralNetworkBuilder(nn_spec=branch_layer.branch.elseBranch)
builder_elsebranch.add_activation('linear1', 'LINEAR', 'input', 'linear1_out')
builder_elsebranch.add_activation('linear_red_1', 'LINEAR', 'input', 'linear_red1_out')
builder_elsebranch.add_activation('linear_red_2', 'LINEAR', 'linear_red1_out', 'linear_red2_out')
builder_elsebranch.add_activation('linear2', 'LINEAR', 'linear1_out', 'relu2_out')
builder.add_squeeze('out', 'relu2_out', 'out', squeeze_all=True)
mlmodel = MLModel(builder.spec)
data = np.random.rand(2,)
data_dict = {'input': data}
before_pass_out = mlmodel.predict(data_dict)['out']
if DEBUG:
print('\n mlmodel description before remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
old_spec = copy.copy(builder.spec)
remove_disconnected_layers(builder.spec)
if DEBUG:
print('\n mlmodel description after remove disconnected layers pass: \n')
print_network_spec(builder.spec, style='coding')
mlmodel = MLModel(builder.spec)
after_pass_out = mlmodel.predict(data_dict)['out']
np.testing.assert_almost_equal(before_pass_out, after_pass_out, decimal=2)
np.testing.assert_equal(len(old_spec.neuralNetwork.layers[1].branch.ifBranch.layers),
len(builder.spec.neuralNetwork.layers[1].branch.ifBranch.layers))
np.testing.assert_equal(len(builder.spec.neuralNetwork.layers[1].branch.elseBranch.layers), 2)
def test_conv_crop_bn_to_conv_bn_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='out')
# Conv -> Crop -> BN
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[2].WhichOneof('layer'))
def test_conv_crop_bn_relu_to_conv_bn_relu_crop(self):
input_features = [('data', datatypes.Array(1, 10, 10))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
W = np.ones((2,10,1,10), dtype=np.float32)
builder.add_convolution(name='conv',
kernel_channels=1,
output_channels=2,
height=2, width=2,
stride_height=1, stride_width=1,
border_mode='valid', groups=1,
W=W,
b=None, has_bias=False,
input_name='data', output_name='conv_out')
builder.add_crop(name='crop',
left=1, right=1, top=1, bottom=1, offset=0,
input_names=['conv_out'],
output_name='crop_out')
builder.add_batchnorm(name='bn',
channels=2,
gamma=np.ones(2,).astype(np.float32),
beta=np.ones(2,).astype(np.float32),
mean=np.ones(2,).astype(np.float32),
variance=np.ones(2,).astype(np.float32),
input_name='crop_out',
output_name='bn_out')
builder.add_activation(name='relu',
non_linearity='RELU',
input_name='bn_out',
output_name='out')
# Conv -> Crop -> BN -> ReLU
spec = builder.spec.neuralNetwork
np.testing.assert_equal('crop', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('batchnorm', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[3].WhichOneof('layer'))
# transform the pattern
transform_conv_crop(builder.spec)
# Conv -> BN -> ReLU -> Crop
np.testing.assert_equal('batchnorm', spec.layers[1].WhichOneof('layer'))
np.testing.assert_equal('activation', spec.layers[2].WhichOneof('layer'))
np.testing.assert_equal('crop', spec.layers[3].WhichOneof('layer'))
def test_redundant_transposes(self):
def _build_and_test_network(input_size, transpose_layers, expected_layers):
"""
Helper function for testing transpose removal.
Args:
input_size: Size of the input network tensor.
transpose_layers: Array of transpose axes definitions.
expected_layers: Array of indices into transpose_layers indicating
which of the transpose layers should be present after the
graph pass.
"""
input_features = [('data', datatypes.Array(*input_size))]
output_features = [('out', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features)
last_layer = 'data'
for idx, axes in enumerate(transpose_layers):
name = 't{}'.format(idx)
if idx == len(transpose_layers) - 1:
output_name = 'out'
else:
output_name = name + '_out'
builder.add_transpose(name=name,
axes=axes,
input_name=last_layer,
output_name=output_name)
last_layer = output_name
spec = builder.spec.neuralNetwork
# Check the network before the graph pass.
for idx in range(len(transpose_layers)):
np.testing.assert_equal('transpose', spec.layers[idx].WhichOneof('layer'))
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify only the expected layers remain.
np.testing.assert_equal(len(spec.layers), len(expected_layers))
for output_layer_idx, input_layer_idx in enumerate(expected_layers):
np.testing.assert_equal(
'transpose',
spec.layers[output_layer_idx].WhichOneof('layer')
)
np.testing.assert_array_equal(
transpose_layers[input_layer_idx],
spec.layers[output_layer_idx].transpose.axes
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes together are the identity.
transpose_layers=[[2, 0, 1], [1, 2, 0]],
expected_layers=[],
)
_build_and_test_network(
input_size=[1, 10, 10],
# These transposes are not inverses.
transpose_layers=[[2, 0, 1], [2, 0, 1]],
expected_layers=[0, 1],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First two are the identity, then an extra.
transpose_layers=[[2, 4, 1, 0, 3], [3, 2, 0, 4, 1], [1, 0, 2, 3, 4]],
expected_layers=[2],
)
_build_and_test_network(
input_size=[1, 1, 10, 10, 3],
# First is okay, next two are the identity.
transpose_layers=[[1, 0, 2, 3, 4], [2, 4, 1, 0, 3], [3, 2, 0, 4, 1]],
expected_layers=[0],
)
# A slightly more complicated test case where there are two transposes
# in topological order, but are actually in parallel in the graph.
builder = neural_network.NeuralNetworkBuilder(
[('data', datatypes.Array(2, 4, 8))],
[('out', None)]
)
last_layer = 'data'
builder.add_transpose(name='t1',
axes=[0, 2, 1],
input_name='data',
output_name='t1')
builder.add_transpose(name='t2',
axes=[0, 2, 1],
input_name='data',
output_name='t2')
builder.add_stack(name='stack',
input_names=['t1', 't2'],
output_name='out')
spec = builder.spec.neuralNetwork
# Run the removal pass.
remove_redundant_transposes(builder.spec)
# Verify nothing was removed.
np.testing.assert_equal(len(spec.layers), 3)
if __name__ == '__main__':
RUN_ALL_TESTS = True
if RUN_ALL_TESTS:
unittest.main()
else:
suite = unittest.TestSuite()
suite.addTest(MLModelPassesTest('test_load_constant_remove'))
unittest.TextTestRunner().run(suite)
| 15,783 | 4,828 |
# Generated by Django 2.1.7 on 2019-04-03 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounting_tech', '0017_auto_20190403_1434'),
]
operations = [
migrations.AlterField(
model_name='request_to_repair',
name='inventory_number',
field=models.ForeignKey(db_column='inventory_number', null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounting_tech.Equipment', verbose_name='ИНВ №'),
),
]
| 575 | 208 |
from puretabix import get_bgzip_lines_parallel
class TestBlockGZip:
def test_get_lines(self, vcf_filename, vcf_gz):
lines = tuple(sorted(map(bytes.decode, vcf_gz.readlines())))
lines_parsed = tuple(sorted(get_bgzip_lines_parallel(vcf_filename)))
for line_in, line_out in zip(lines, lines_parsed):
print(line_in, line_out)
line_in = line_in.strip()
line_out = str(line_out)
assert line_in == line_out, (line_in, line_out)
| 500 | 170 |
from setuptools import find_packages, setup
setup(
name="datapool_client",
version="1.0",
description="Designed to access the datapool software developed by ETH Zurich - SIS and Eawag. "
"Find out more under https://datapool.readthedocs.io/en/latest/.",
author="Christian Foerster",
author_email="christian.foerster@eawag.ch",
license="MIT Licence",
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.9",
],
install_requires=[
"pandas",
"numpy",
"psycopg2-binary",
"matplotlib",
"cufflinks",
"plotly",
"pyparsing==2.4.7",
"sqlalchemy",
"tqdm"
],
keywords="datapool_client, eawag, postgres",
packages=find_packages(),
include_package_data=True,
)
| 887 | 289 |