id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
179449 | <filename>__main__.py
import os
from pathlib import Path
import socketserver
from server import *
from auth import *
from smarthome import *
class ThreadingSimpleServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
pass
def requestDevicesSync():
SmartHomeReqHandler.forceDevicesSync()
def startServer():
try:
#Create a web server and define the handler to manage the
#incoming request
server = ThreadingSimpleServer(('', PORT_NUMBER), AogServer)
print ('Started httpserver on port ' , PORT_NUMBER)
current_dir = Path(__file__).parent.absolute()
command = 'python3 ' + str(current_dir) + '/sync.py'
os.popen(command)
#Wait forever for incoming htto requests
server.serve_forever()
except (KeyboardInterrupt, SystemExit):
print ('^C received, shutting down the web server')
server.socket.close()
if __name__ == "__main__":
# execute only if run as a script
for path,value in {**oauthGetMappings, **smarthomeGetMappings}.items():
addGetMappings(path, value)
for path,value in {**oauthPostMappings, **smarthomePostMappings}.items():
addPostMappings(path, value)
startServer()
| StarcoderdataPython |
1770848 | """
Testing
"""
from setuptools import setup, find_packages
setup(
name="ostur",
version="0.1",
description="Ostur python libraries",
url="https://github.com/alvarop/ostur",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| StarcoderdataPython |
3255865 | """initial revision
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2022-01-20 13:04:42.464001
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ab8ca6ee9963'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| StarcoderdataPython |
158530 | <reponame>macdaliot/exist
from rest_framework.routers import DefaultRouter
from apps.reputation.api import blViewSet
from apps.twitter.api import twViewSet
from apps.exploit.api import exViewSet
from apps.threat.api import threatEventViewSet, threatAttrViewSet
router = DefaultRouter(trailing_slash=False)
router.register('reputation', blViewSet)
router.register('twitter', twViewSet)
router.register('exploit', exViewSet)
router.register('threatEvent', threatEventViewSet)
router.register('threatAttribute', threatAttrViewSet)
| StarcoderdataPython |
1787988 | from architect.version import __version__ | StarcoderdataPython |
3358350 | #!/usr/bin/env python3
import argparse
import glob
import operator
import shutil
import os
import sys
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("--dict_dir", help='Folder containing dictionaries')
parser.add_argument("--set_", help='input set for evaluation')
args = parser.parse_args()
if args.set_ == 'test_synthetic' or args.set_ == 'test' or args.set_ == 'unrelated' \
or args.set_ == 'some_meaning_difference':
if args.set_ == 'test_synthetic':
suffix='best_test_synthetic_preds_gt.txt'
elif args.set_ == 'test':
suffix='best_test_preds_gt.txt'
elif args.set_ == 'unrelated':
suffix ='best_unrelated_preds_gt.txt'
elif args.set_ == 'some_meaning_difference':
suffix ='best_some_meaning_difference_preds_gt.txt'
preds, gold, res = [], [], []
with open(args.dict_dir + suffix, 'r') as file_:
file_ = file_.readlines()
for line in file_:
line = line.strip().split('\t')
preds.append(int(line[0]))
gold.append(int(line[1]))
precisions_per_class = precision_score(gold, preds, average=None)
recall_per_class = recall_score(gold, preds, average=None)
f1_per_class = f1_score(gold, preds, average=None)
precision_weighted = precision_score(gold, preds, average='weighted')
recall_weighted = recall_score(gold, preds, average='weighted')
f1_micro = f1_score(gold, preds, average='micro')
f1_weighted = f1_score(gold, preds, average='weighted')
re = [precisions_per_class[0], recall_per_class[0], f1_per_class[0], precisions_per_class[1], recall_per_class[1], f1_per_class[1], f1_weighted]
return re
if __name__ == "__main__":
result = main()
sys.exit(result)
| StarcoderdataPython |
3300821 | <gh_stars>0
from .canvas_sizes import MinHelper, MaxHelper
| StarcoderdataPython |
3238077 | <filename>HW3/randomforest.py
# -*- coding: utf-8 -*-
import pandas as pd
from sklearn import tree
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import model_selection
from sklearn import metrics
import numpy as np
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC, LinearSVC
data = pd.read_csv("train.csv")
# preproccessing
data["Fare"]=data["Fare"].fillna(data["Fare"].dropna().median())
data["Age"]=data["Age"].fillna(data["Age"].dropna().mean())
data.loc[data["Sex"]=="male","Sex"]=0
data.loc[data["Sex"]=="female","Sex"]=1
data["Embarked"]=data["Embarked"].fillna("5")
data.loc[data["Embarked"]=="S","Embarked"]=0
data.loc[data["Embarked"]=="C","Embarked"]=1
data.loc[data["Embarked"]=="Q","Embarked"]=2
# part 1
feature_names1 =["Pclass","Sex","Age","Fare","SibSp" ,"Parch","Embarked"]
x = data[feature_names1].values
y = data["Survived"].values
X_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.2, random_state=5)
random_forest1 = RandomForestClassifier(n_estimators=100,criterion="gini",max_depth=7)
random_forest1.fit(X_train, Y_train)
Y_prediction = random_forest1.predict(X_test)
random_forest1.score(X_train, Y_train)
print("Accuracy for 7 features with gini and max_depth=7 on train:", random_forest1.score(X_train, Y_train))
print("Accuracy for 7 features with gini and max_depth=7 on test:", metrics.accuracy_score(Y_test, Y_prediction))
#Part2
random_forest2 = RandomForestClassifier(n_estimators=100,criterion="gini",max_depth=2)
random_forest2.fit(X_train, Y_train)
Y_prediction = random_forest2.predict(X_test)
print("Accuracy for 7 features with gini and max_depth=2 on train:", random_forest2.score(X_train, Y_train))
print("Accuracy for 7 features with gini and max_depth=2 on test:", metrics.accuracy_score(Y_test, Y_prediction))
#part 3
random_forest3 = RandomForestClassifier(n_estimators=100,criterion="entropy",max_depth=7)
random_forest3.fit(X_train, Y_train)
Y_prediction = random_forest3.predict(X_test)
print("Accuracy for 7 features with entropy and max_depth=7 on train:", random_forest3.score(X_train, Y_train))
print("Accuracy for 7 features with entropy and max_depth=7 on test:", metrics.accuracy_score(Y_test, Y_prediction))
#part 4
random_forest3 = RandomForestClassifier(n_estimators=100,criterion="entropy",max_depth=2)
random_forest3.fit(X_train, Y_train)
Y_prediction = random_forest3.predict(X_test)
print("Accuracy for 7 features with entropy and max_depth=2 on train:", random_forest3.score(X_train, Y_train))
print("Accuracy for 7 features with entropy and max_depth=2 on test:", metrics.accuracy_score(Y_test, Y_prediction))
import time
#Calculating Time
start = time.time()
random_forest1 = RandomForestClassifier(n_estimators=100,criterion="gini",max_depth=7)
random_forest1.fit(X_train, Y_train)
Y_prediction = random_forest1.predict(X_test)
random_forest1.score(X_train, Y_train)
end = time.time()
print(f"Runtime for random forest is {end - start}")
start = time.time()
dt11 = DecisionTreeClassifier(random_state=5,max_depth=7,criterion="entropy")
dt11=dt11.fit(X_train,Y_train)
y_pred = dt11.predict(X_test)
end = time.time()
print(f"Runtime for decision tree is {end - start}") | StarcoderdataPython |
2653 | <reponame>HaujetZhao/Caps_Writer<filename>src/moduels/gui/Tab_Help.py
# -*- coding: UTF-8 -*-
from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout
from PySide2.QtCore import Signal
from moduels.component.NormalValue import 常量
from moduels.component.SponsorDialog import SponsorDialog
import os, webbrowser
class Tab_Help(QWidget):
状态栏消息 = Signal(str, int)
def __init__(self):
super().__init__()
self.initElement() # 先初始化各个控件
self.initSlots() # 再将各个控件连接到信号槽
self.initLayout() # 然后布局
self.initValue() # 再定义各个控件的值
def initElement(self):
self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档'))
self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记'))
self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程'))
self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))
self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))
self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群'))
self.tipButton = QPushButton(self.tr('打赏作者'))
self.masterLayout = QVBoxLayout()
def initSlots(self):
self.打开帮助按钮.clicked.connect(self.openHelpDocument)
self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))
self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))
self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))
self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))
self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open(
self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))
self.tipButton.clicked.connect(lambda: SponsorDialog(self))
def initLayout(self):
self.setLayout(self.masterLayout)
# self.masterLayout.addWidget(self.打开帮助按钮)
# self.masterLayout.addWidget(self.ffmpegMannualNoteButton)
self.masterLayout.addWidget(self.openVideoHelpButtone)
self.masterLayout.addWidget(self.openGiteePage)
self.masterLayout.addWidget(self.openGithubPage)
self.masterLayout.addWidget(self.linkToDiscussPage)
self.masterLayout.addWidget(self.tipButton)
def initValue(self):
self.打开帮助按钮.setMaximumHeight(100)
self.ffmpegMannualNoteButton.setMaximumHeight(100)
self.openVideoHelpButtone.setMaximumHeight(100)
self.openGiteePage.setMaximumHeight(100)
self.openGithubPage.setMaximumHeight(100)
self.linkToDiscussPage.setMaximumHeight(100)
self.tipButton.setMaximumHeight(100)
def openHelpDocument(self):
try:
if 常量.系统平台 == 'Darwin':
import shlex
os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html")))
elif 常量.系统平台 == 'Windows':
os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))
except:
print('未能打开帮助文档')
| StarcoderdataPython |
1790096 | <gh_stars>100-1000
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render
from django.http import HttpResponseRedirect
from vaas.cluster.cluster import ServerExtractor
from vaas.cluster.models import LogicalCluster
from .forms import PurgeForm
from vaas.purger.purger import VarnishPurger
def purger_permission(user):
return user.is_staff
@user_passes_test(purger_permission, login_url='/admin/login')
def purge_view(request):
if request.method == 'POST':
form = PurgeForm(request.POST)
if form.is_valid():
cluster = LogicalCluster.objects.get(pk=form.cleaned_data['cluster'].pk)
servers = ServerExtractor().extract_servers_by_clusters([cluster])
result = VarnishPurger().purge_url(form.cleaned_data['url'], servers)
messages.warning(
request,
'Url {} purged from cluster {} - cleaned {} server(s), errors occurred for {} server(s)'.format(
form.cleaned_data['url'], cluster.name, len(result['success']), len(result['error'])
)
)
return HttpResponseRedirect('/')
else:
form = PurgeForm()
return render(request, 'purge_form.html', {'form': form, 'has_permission': True})
| StarcoderdataPython |
3305355 | <reponame>vyahello/quotes
"""ASGI config for manager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
from django.core.handlers.asgi import ASGIHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "manager.settings")
application: ASGIHandler = get_asgi_application()
| StarcoderdataPython |
3316170 | <filename>configserver/tools/__init__.py
# Initial server source code from <NAME> (<EMAIL>) (Many thanks for showing me a great scaffold for CherryPy)
# All levels of the XSS challenge are under the MIT license; Good luck learning!
# Follow me on twitter @infosec_au or take a visit to http://shubh.am | StarcoderdataPython |
3358989 | <gh_stars>1-10
"""
Implements a simple version check for a client/server pair.
Used when you want to verify if your server version is a minimum
value.
The decorator allows arguments within the decorator itself.
"""
# Map version tuple to MAPDL release version
VERSION_MAP = {(0, 0, 0): '2020R2',
(0, 3, 0): '2021R1',
(0, 4, 0): '2021R2',
(0, 4, 1): '2021R2'}
def meets_version(version, meets):
"""Check if a version string meets a minimum version.
This is a simplified way to compare version strings. For a more robust
tool, please check out the ``packaging`` library:
https://github.com/pypa/packaging
Parameters
----------
version : str
Version string. For example ``'0.25.1'``.
meets : str
Version string. For example ``'0.25.2'``.
Returns
-------
newer : bool
True if version ``version`` is greater or equal to version ``meets``.
Examples
--------
>>> meets_version('0.25.1', '0.25.2')
False
>>> meets_version('0.26.0', '0.25.2')
True
"""
if not isinstance(version, tuple):
va = version_tuple(version)
else:
va = version
if not isinstance(meets, tuple):
vb = version_tuple(meets)
else:
vb = meets
if len(va) != len(vb):
raise ValueError("Versions are not comparable.")
for i in range(len(va)):
if va[i] > vb[i]:
return True
elif va[i] < vb[i]:
return False
# Arrived here if same version
return True
def version_tuple(v):
"""Convert a version string to a tuple containing ints.
Non-numeric version strings will be converted to 0. For example:
``'0.28.0dev0'`` will be converted to ``'0.28.0'``
Returns
-------
ver_tuple : tuple
Length 3 tuple representing the major, minor, and patch
version.
"""
split_v = v.split(".")
while len(split_v) < 3:
split_v.append('0')
if len(split_v) > 3:
raise ValueError('Version strings containing more than three parts '
'cannot be parsed')
vals = []
for item in split_v:
if item.isnumeric():
vals.append(int(item))
else:
vals.append(0)
return tuple(vals)
class VersionError(ValueError):
"""Raised when the Server is the wrong version"""
def __init__(self, msg='Invalid Server version'):
ValueError.__init__(self, msg)
def version_requires(min_version):
"""Ensure the method called matches a certain version.
Example usage:
class Client():
def __init__(self):
'''Connects to a fake server'''
self._server = FakeServer()
@version_requires((0, 1, 3)) # require 0.1.3
def meth_a(self):
'''calls method a on the 'server''''
return self._server.meth_a()
@version_requires((0, 2, 3)) # require 0.2.3
def meth_b(self):
'''calls method b on the 'server''''
return self._server.meth_b()
"""
def decorator(func):
# first arg *must* be a tuple containing the version
if not isinstance(min_version, tuple):
raise TypeError('version_requires decorator must include a version '
'tuple. For example:\n'
'``@_version_requires((0, 1, 3))``')
if not len(min_version) == 3:
raise TypeError('version_requires decorator must include a version '
'tuple. For example:\n'
'``@_version_requires((0, 1, 3))``')
def wrapper(self, *args, **kwargs):
"""Call the original function"""
# must be called from a "Client" instance containing a server attribute
if not hasattr(self, '_server_version'):
raise AttributeError('decorated class must have `_server_version` '
'attribute')
if not meets_version(self._server_version, min_version):
# try to give the user a helpful warning indicating
# the minimum version of MAPDL
if min_version in VERSION_MAP:
raise VersionError(f'``{func.__name__}`` requires MAPDL version '
f'>= {VERSION_MAP[min_version]}')
# otherwise, use the less helpful "gRPC server" version
raise VersionError(f'``{func.__name__}`` requires gRPC server '
f'version >= {min_version}')
return func(self, *args, **kwargs)
return wrapper
return decorator
| StarcoderdataPython |
172385 | import liquepy as lq
import numpy as np
import eqsig
import pysra
import sfsimodels as sm
class EqlinStockwellAnalysis(object):
def __init__(self, soil_profile, in_sig, rus=None, wave_field='outcrop', store='surface', gibbs=0, t_inc=1.0, t_win=3.0, strain_at_incs=True, strain_ratio=0.9):
"""
Equivalent linear Stockwell Analysis
This method performs the eight step procedure outlined in Millen et al. (2021)
to obtain the surface acceleration time series from an input motion at the base of a 1D soil profile.
Note: a soil layer is a layer as defined in the soil_profile object, a slice is a layer from the pysra_profile.
Parameters
----------
soil_profile: sm.SoilProfile object
in_sig: eqsig.AccSignal object
Input motion at base
rus: array_like or None
A 2D array of pore pressure ratios of shape `(soil_profile.n_layers, in_sig.npts)`,
if none the 'total stress' conditions are assumed
wave_field: str
If input motion should be used as an `outcrop` or '`within` motion.
store: str
if 'surface' (default), it only stores the surface acceleration time series,
if 'all' then stores Stockwell transforms.
gibbs: int or None
If integer then zero-pad input motion to next power of 2, to reduce Gibb's effect
t_inc: float (default=1s)
Time increment of interval for determining transfer functions
t_win: float (default=3s)
Time window for determining maximum strain value
strain_at_incs: bool (default=True)
If true then compute effective strain at time intervals, else use same value for full time series
strain_ratio: float (default=0.9)
Ratio between effective strain and peak (maximum) strain
"""
assert isinstance(soil_profile, sm.SoilProfile)
org_npts = in_sig.npts
# Determine number of zeros required for zero padding
if gibbs is not None: # If it is an integer then add to the exponent of 2 to remove the Gibbs effect
nindex = int(np.ceil(np.log2(org_npts))) + gibbs
new_len = 2 ** nindex
diff_len = new_len - org_npts
front = 0 # int(diff_len / 2)
back = diff_len - front
else: # record length must be a factor of 4
back = int(4 * np.ceil(org_npts / 4) - org_npts)
front = 0
# pad the input signal with zeros to make length a factor of 4
in_sig = eqsig.AccSignal(np.pad(in_sig.values, (front, back), mode='constant'), in_sig.dt)
self.t_inds = np.arange(0, in_sig.npts - 1, int(t_inc / in_sig.dt), dtype=int) # indices of time intervals
self.t_inds = np.insert(self.t_inds, len(self.t_inds), in_sig.npts) # make sure last value is in list
ics = np.array((self.t_inds[1:] + self.t_inds[:-1]) / 2, dtype=int) # halfway between indices of time intervals
points = int(in_sig.npts / 2)
freqs = np.arange(0, points) / (points * in_sig.dt * 2) # All the frequencies in the Stockwell transform
# freqs_d2 = freqs[:int(points / 2)] # Frequencies needed to compute the transfer function
# Steps 1 & 2) Conduct and equivalent linear analysis and obtain strain time series
pysra_profile, strains = compute_pysra_strain_time_series(soil_profile, in_sig, target_height=0.5,
wave_field=wave_field)
# 3a) Calculate the effective strain in each time interval for each slice of the soil profile
iside = int(t_win / in_sig.dt) # width of window in increments
eff_strains = []
for i, depth in enumerate(pysra_profile.depth):
eff_strains.append([])
for tt in range(len(ics)):
if strain_at_incs:
si = max([ics[tt] - iside, 0])
ei = min([ics[tt] + iside, len(strains[i])])
max_strain = max(abs(strains[i][si: ei]))
else: # Note this is different to the pysra eff. strain -which estimates the peak from the strain tf
max_strain = max(abs(strains[i]))
eff_strains[i].append(strain_ratio * max_strain)
# 4a) Obtain the reduction in secant stiffness and increase in damping from pore pressure at each time interval
# Parameters defined in Millen et al. (2020)
dxi_ld_liq = 0.3 # (Delta-xi-low-density-at-liquefaction)
dxi_hd_liq = 0.1 # (Delta-xi-high-density-at-liquefaction)
gr_ld_liq = 0.03 # (secant-shear-modulus-ratio-low-density-at-liquefaction)
gr_hd_liq = 0.15 # (secant-shear-modulus-ratio-high-density-at-liquefaction)
x_ld = 0.45 # Low density threshold
x_hd = 0.8 # high density threshold
ru_gr_low = 0.3 # Low pore pressure ratio threshold for secant shear modulus change
ru_gr_high = 0.8 # High pore pressure ratio threshold for secant shear modulus change
ru_dx_low = 0.5 # Low pore pressure ratio threshold for damping increment change
ru_dx_high = 1.0 # High pore pressure ratio threshold for damping increment change
min_g_liq_vs_g0 = 0.001 # Limiting ratio between the shear modulus at liquefaction divided by initial shear mod
max_xi_liq = 0.3 # Maximum value for damping
# The arrays for the secant stiffness ratio and damping increase at each time interval from pore pressure
gred_is = np.ones((soil_profile.n_layers, len(ics)))
dxi_is = np.zeros((soil_profile.n_layers, len(ics)))
if rus is not None: # if pore pressure time series is defined then calculate the pore pressure corrections
assert len(rus[0]) == org_npts, (len(rus[0]), org_npts)
gr_liqs = np.ones(soil_profile.n_layers) # The secant stiffness ratio at liquefaction for each soil layer
dxi_liqs = np.zeros(soil_profile.n_layers) # The damping increase at liquefaction for each soil layer
for i in range(soil_profile.n_layers):
dr = soil_profile.layer(i + 1).relative_density
if dr is None:
if max(rus[i]):
raise ValueError('Relative density must be set for layer: ', i + 1)
else:
continue
# Calculate the secant stiffness ratio at liquefaction based on relative density
gr_liq = np.where(dr < x_ld, gr_ld_liq, gr_ld_liq + (gr_hd_liq - gr_ld_liq) / (x_hd - x_ld) * (dr - x_ld))
np.clip(gr_liq, None, gr_hd_liq, out=gr_liq)
gr_liqs[i] = gr_liq
# Calculate the damping increase at liquefaction based on relative density
dx_max = np.where(dr < x_ld, dxi_ld_liq, dxi_ld_liq + (dxi_hd_liq - dxi_ld_liq) / (x_hd - x_ld) * (dr - x_ld))
np.clip(dx_max, dxi_hd_liq, None, out=dx_max)
dxi_liqs[i] = dx_max
# zero pad pore pressure time series to be consistent with acceleration time series
rus = np.pad(rus, [(0, 0), (front, back)], mode='constant')
# Calculate the secant stiffness ratio at each time step based on pore pressure ratio (ru)
greds = np.where(rus < ru_gr_low, 1, 1 - (1 - gr_liqs[:, np.newaxis]) / (ru_gr_high - ru_gr_low) * (rus - ru_gr_low))
np.clip(greds, gr_liqs[:, np.newaxis], None, out=greds)
# Calculate the damping increase at each time step based on pore pressure ratio (ru)
dxs = np.where(rus < ru_dx_low, 0, dxi_liqs[:, np.newaxis] / (ru_dx_high - ru_dx_low) * (rus - ru_dx_low))
np.clip(dxs, None, dxi_liqs[:, np.newaxis], out=dxs)
# Calculate the secant stiffness ratio and damping increase at each time interval
for tt in range(len(ics)):
gred_is[:, tt] = np.mean(greds[:, self.t_inds[tt]: self.t_inds[tt + 1]], axis=1)
dxi_is[:, tt] = np.mean(dxs[:, self.t_inds[tt]: self.t_inds[tt + 1]], axis=1)
# 5) Develop input-to-surface transfer functions
self.tfs = [] # A list to store the transfer functions at each increment
for tt in range(len(self.t_inds[1:])):
layers = []
for i, depth in enumerate(pysra_profile.depth):
org_layer = pysra_profile.location('outcrop', depth=depth).layer
org_layer.strain = eff_strains[i][tt] # Apply effective strain (Step 3b)
shear_vel0 = org_layer.initial_shear_vel
shear_vel = org_layer.shear_vel
damping = org_layer.damping
slice_thickness = org_layer.thickness
# get pore pressure effects
ind = soil_profile.get_layer_index_by_depth(depth) - 1
dx = dxi_is[ind][tt]
gred = gred_is[ind][tt]
# 4b) determine the new shear modulus and damping accounting for strain and pore pressure
xi_liq = min([damping + dx, max_xi_liq])
vs_liq = max([np.sqrt(min_g_liq_vs_g0) * shear_vel0, shear_vel * np.sqrt(gred)])
pysra_sl = pysra.site.SoilType("soil", org_layer.unit_wt, None, xi_liq)
lay = pysra.site.Layer(pysra_sl, slice_thickness, vs_liq)
layers.append(lay)
# rebuild the pysra_profile with the new properties
strain_comp_profile = pysra.site.Profile(layers, wt_depth=soil_profile.gwl)
# determine the new transfer function for this interval
freq1, tf_values = lq.sra.calc_pysra_tf(strain_comp_profile, freqs, wave_field=wave_field)
# refactor transfer function to be applied to Stockwell transform
tf_values = np.flipud(np.conj(tf_values))
# tf_values = np.concatenate((tf_values, np.flipud(np.conj(tf_values))))
tf_values = tf_values.reshape(len(tf_values), 1)
self.tfs.append(tf_values)
# 6) Obtain the Stockwell transform of the input motion
in_sig.swtf = eqsig.stockwell.transform(in_sig.values)
# 7) Obtain the surface Stockwell transform by multiplying input Stockwell transform by transfer functions
ps = []
for ss in range(len(self.tfs)):
p1 = self.tfs[ss] * in_sig.swtf[:, self.t_inds[ss]:self.t_inds[ss + 1]]
ps.append(p1)
surf_st = np.concatenate(ps, axis=1)
# 8) Perform the inverse Stockwell transform to obtain the surface acceleration time series
iacc = eqsig.stockwell.itransform(surf_st)
# save the surface acceleration series as a parameter
self.surf_sig = eqsig.AccSignal(iacc, in_sig.dt)
if store == 'all': # Store the Stockwell transforms of the input motion if needed
# self.tfs = tfs
self.freqs = freqs
self.in_sig = in_sig
self.surf_sig.stockwell = surf_st
self.in_sig.smooth_freqs = np.linspace(0.2, 1 / (4 * in_sig.dt), 30)
self.surf_sig.smooth_freqs = np.linspace(0.2, 1 / (4 * in_sig.dt), 30)
def compute_pysra_strain_time_series(soil_profile, in_sig, d_inc=None, target_height=1.0, wave_field='outcrop', in_loc=-1, atype='eqlin'):
"""
Perform an equivalent linear analysis and obtain the strain time series at many depths
Parameters
----------
soil_profile: sm.SoilProfile object
in_sig: eqsig.AccSignal object
Input motion at base
d_inc: float
Target depth increment for each layer in soil_profile
target_height: float
Target depth increment for whole soil profile
wave_field: str
If input motion should be used as an `outcrop` or '`within` motion.
in_loc: int
If -1 then input motion at base, if 0 then input motion at surface
Returns
-------
"""
import pysra
m = pysra.motion.TimeSeriesMotion(filename=in_sig.label, description=None, time_step=in_sig.dt,
accels=in_sig.values / 9.8)
if d_inc is None:
d_inc = 1.0 * np.ones(soil_profile.n_layers)
profile = lq.sra.sm_profile_to_pysra(soil_profile, target_height=target_height, d_inc=d_inc)
strain_ratio = None
kw = {}
if strain_ratio is not None:
kw['strain_ratio'] = strain_ratio
if atype == 'eqlin':
calc = pysra.propagation.EquivalentLinearCalculator(**kw)
elif atype == 'fd':
calc = pysra.propagation.FrequencyDependentEqlCalculator(use_smooth_spectrum=False, **kw)
elif atype == 'fdk': # k=Kausel
calc = pysra.propagation.FrequencyDependentEqlCalculator(use_smooth_spectrum=True, **kw)
elif atype == 'linear':
calc = pysra.propagation.LinearElasticCalculator()
else:
raise ValueError(f'atype must: "eqlin", "fd", "fdk", "linear". Not {atype}')
if in_loc == -1:
in_depth = soil_profile.height
else:
in_depth = 0.0
calc(m, profile, profile.location(wave_field, depth=in_depth))
outs = []
for i, depth in enumerate(profile.depth):
outs.append(pysra.output.StrainTSOutput(pysra.output.OutputLocation('within', depth=depth),
in_percent=False))
outputs = pysra.output.OutputCollection(outs)
outputs(calc)
strains = []
for i, depth in enumerate(profile.depth):
strains.append(outputs[i].values)
return profile, strains
| StarcoderdataPython |
3269844 | import datetime
from nasdaq_100_ticker_history import tickers_as_of
def test_basics() -> None:
assert 'AMZN' in tickers_as_of(2020, 6, 1)
assert len(tickers_as_of(2020, 6, 1)) >= 100
def _test_one_swap(as_of_date: datetime.date,
removed_ticker: str,
added_ticker: str,
expected_number_of_tickers: int) -> None:
tickers_on_change_date = tickers_as_of(as_of_date.year,
as_of_date.month,
as_of_date.day)
assert len(tickers_on_change_date) == expected_number_of_tickers
before_change_date = as_of_date - datetime.timedelta(days=1)
tickers_before_change_date = tickers_as_of(before_change_date.year,
before_change_date.month,
before_change_date.day)
assert len(tickers_before_change_date) == expected_number_of_tickers
assert removed_ticker in tickers_before_change_date
assert added_ticker not in tickers_before_change_date
assert removed_ticker not in tickers_on_change_date
assert added_ticker in tickers_on_change_date
def _test_at_year_boundary(year: int) -> None:
"""prove the tickers at the beginning of the year match the set at the end of the
previous year.
"""
begin_of_current_year = datetime.date.fromisoformat(f"{year}-01-01")
end_of_previous_year = begin_of_current_year - datetime.timedelta(days=1)
current_tickers = tickers_as_of(begin_of_current_year.year,
begin_of_current_year.month,
begin_of_current_year.day)
previous_tickers = tickers_as_of(end_of_previous_year.year,
end_of_previous_year.month,
end_of_previous_year.day)
assert previous_tickers == current_tickers
def test_tickers_2022() -> None:
num_tickers_2022 = 101
# On Jan 24, Old Dominion replaces Peloton
_test_one_swap(datetime.date.fromisoformat('2022-01-24'), 'PTON', 'ODFL', num_tickers_2022)
# On Feb 2, Excelon EXC split off Constellation Energy CEG, which remained in the index
tickers_added_2022_02_02 = frozenset(('CEG',))
assert tickers_added_2022_02_02.isdisjoint(tickers_as_of(2022, 2, 1))
assert tickers_added_2022_02_02.issubset(tickers_as_of(2022, 2, 2))
assert len(tickers_as_of(2022, 2, 2)) == num_tickers_2022 + 1
num_tickers_2022 += 1
# AMD completed its acquisition of Xilinx XLNX on or about 14 Feb.
# So AstraZeneca AZN replaces XLNX as of 22 Feb 2022.
_test_one_swap(datetime.date.fromisoformat('2022-02-22'), 'XLNX', 'AZN', num_tickers_2022)
def test_year_boundary_2021_2022() -> None:
_test_at_year_boundary(2022)
def test_2021_annual_changes() -> None:
num_tickers_2021_end_of_year = 101
# Annual 2021 changes
# https://www.nasdaq.com/press-release/annual-changes-to-the-nasdaq-100-indexr-2021-12-10-0
#
# On December 10, 2021 Nasdaq announced that six new companies would join the index
# prior to the market open on December 20, 2021.
# They are Airbnb (ABNB), Datadog (DDOG), Fortinet (FTNT), Lucid Group (LCID),
# Palo Alto Networks (PANW), and Zscaler (ZS).
# They will replace CDW (CDW), Cerner (CERN), Check Point (CHKP), Fox Corporation (FOXA/FOX),
# Incyte (INCY), and Trip.com (TCOM).
# https://greenstocknews.com/news/nasdaq/lcid/annual-changes-to-the-nasdaq-100-index
# This removes 7 tickers while adding 6, so total number of tickers goes to 101
assert len(tickers_as_of(2021, 12, 17)) == num_tickers_2021_end_of_year + 1
tickers_removed_2021_12_20 = frozenset(('CDW', 'CERN', 'CHKP', 'FOX', 'FOXA', 'INCY', 'TCOM'))
assert tickers_removed_2021_12_20.issubset(tickers_as_of(2021, 12, 17))
tickers_added_2021_12_20 = frozenset(('ABNB', 'DDOG', 'FTNT', 'LCID', 'PANW', 'ZS'))
assert tickers_added_2021_12_20.isdisjoint(tickers_as_of(2021, 12, 17))
assert len(tickers_as_of(2021, 12, 20)) == num_tickers_2021_end_of_year
assert tickers_removed_2021_12_20.isdisjoint(tickers_as_of(2021, 12, 20))
assert tickers_added_2021_12_20.issubset(tickers_as_of(2021, 12, 20))
def test_tickers_2021() -> None:
num_tickers_2021 = 102
# On July 21, Honeywell replaces Alexion
_test_one_swap(datetime.date.fromisoformat('2021-07-21'), 'ALXN', 'HON', num_tickers_2021)
# On Aug 26, Crowdstrike replaced Maxim Integrated Products, who is being acquired by Analog Devices.
_test_one_swap(datetime.date.fromisoformat('2021-08-26'), 'MXIM', 'CRWD', num_tickers_2021)
def test_year_boundary_2020_2021() -> None:
_test_at_year_boundary(2021)
def test_tickers_2020() -> None:
num_tickers_2020: int = 103
_test_at_year_boundary(2020)
# On April 20, Dexcom replaced American Airlines Group in the index
_test_one_swap(datetime.date.fromisoformat('2020-04-20'), 'AAL', 'DXCM', num_tickers_2020)
# On April 30, Zoom Video Communications replaced <NAME>
_test_one_swap(datetime.date.fromisoformat('2020-04-30'), 'WLTW', 'ZM', num_tickers_2020)
# On June 22, DocuSign, Inc. (DOCU) will replace United Airlines Holdings, Inc. (Nasdaq: UAL)
_test_one_swap(datetime.date.fromisoformat('2020-06-22'), 'UAL', 'DOCU', num_tickers_2020)
# On Jul 20, Moderna MRNA replaces CoStar Group CGSP
# https://www.globenewswire.com/news-release/2020/07/13/2061339/0/en/Moderna-Inc-to-Join-the-NASDAQ-100-Index-Beginning-July-20-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-07-20'), 'CSGP', 'MRNA', num_tickers_2020)
# On 24 Aug 2020, Pinduoduo, Inc. PDD replaced NetApp, Inc. NTAP in the NASDAQ-100 Index.
# https://www.globenewswire.com/news-release/2020/08/15/2078875/0/en/Pinduoduo-Inc-to-Join-the-NASDAQ-100-Index-Beginning-August-24-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-08-24'), 'NTAP', 'PDD', 103)
# Western Digital Corp (WDC) is replaced by Keurig Dr Pepper Inc. (KDP) as of Oct 19, 2020.
# https://www.globenewswire.com/news-release/2020/10/10/2106521/0/en/Keurig-Dr-Pepper-Inc-to-Join-the-NASDAQ-100-Index-Beginning-October-19-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-10-19'), 'WDC', 'KDP', 103)
def test_2020_annual_changes() -> None:
# Annual 2020 changes
# https://www.nasdaq.com/press-release/annual-changes-to-the-nasdaq-100-index-2020-12-11
#
# 6 companies added; 6 removed. However, Liberty Global PLC has 2 symbols: (Nasdaq: LBTYA/LBTYK)
# So total tickers change from 103 to 102.
# Effective date: 2020-12-21
assert len(tickers_as_of(2020, 12, 18)) == 103
tickers_removed_12_21 = frozenset(('BMRN', 'CTXS', 'EXPE', 'LBTYA', 'LBTYK', 'TTWO', 'ULTA'))
assert tickers_removed_12_21.issubset(tickers_as_of(2020, 12, 18))
tickers_added_12_21 = frozenset(('AEP', 'MRVL', 'MTCH', 'OKTA', 'PTON', 'TEAM'))
assert tickers_added_12_21.isdisjoint(tickers_as_of(2020, 12, 18))
assert len(tickers_as_of(2020, 12, 21)) == 102
assert tickers_removed_12_21.isdisjoint(tickers_as_of(2020, 12, 21))
assert tickers_added_12_21.issubset(tickers_as_of(2020, 12, 21))
def test_tickers_2019() -> None:
num_tickers_2019: int = 103
_test_at_year_boundary(2019)
# 6 tickers added and removed on 12/23/2019
# https://finance.yahoo.com/news/annual-changes-nasdaq-100-index-010510822.html
tickers_2019_dec_23 = tickers_as_of(2019, 12, 23)
assert len(tickers_2019_dec_23) == num_tickers_2019
dec_23_removals = frozenset(('HAS', 'HSIC', 'JBHT', 'MYL', 'NLOK', 'WYNN'))
assert tickers_2019_dec_23.isdisjoint(dec_23_removals)
dec_23_additions = frozenset(('ANSS', 'CDW', 'CPRT', 'CSGP', 'SGEN', 'SPLK'))
assert dec_23_additions.issubset(tickers_2019_dec_23)
tickers_2019_dec_20 = tickers_as_of(2019, 12, 20)
assert len(tickers_2019_dec_20) == num_tickers_2019
assert dec_23_removals.issubset(tickers_2019_dec_20)
assert tickers_2019_dec_20.isdisjoint(dec_23_additions)
# 1 swap Nov 19
# https://www.nasdaq.com/press-release/exelon-corporation-to-join-the-nasdaq-100-index-beginning-november-21-2019-2019-11-18
_test_one_swap(datetime.date.fromisoformat('2019-11-19'), 'CELG', 'EXC', num_tickers_2019)
# there was a record of 21st Century Fox changing to Fox Corp. But as near as I can tell, the ticker
# symbols were the same.
def test_tickers_2018() -> None:
num_tickers_2018: int = 103
_test_at_year_boundary(2018)
# 6 tickers added and removed on 12/24/2018
# https://www.nasdaq.com/about/press-center/annual-changes-nasdaq-100-index-0
tickers_2018_dec_23 = tickers_as_of(2018, 12, 23)
assert len(tickers_2018_dec_23) == num_tickers_2018
tickers_2018_dec_24 = tickers_as_of(2018, 12, 24)
assert len(tickers_2018_dec_24) == num_tickers_2018
dec_24_removals = frozenset(('ESRX', 'HOLX', 'QRTEA', 'SHPG', 'STX', 'VOD'))
assert dec_24_removals.issubset(tickers_2018_dec_23)
assert tickers_2018_dec_24.isdisjoint(dec_24_removals)
dec_24_additions = frozenset(('AMD', 'LULU', 'NTAP', 'UAL', 'VRSN', 'WLTW'))
assert dec_24_additions.issubset(tickers_2018_dec_24)
# 11/19/2018 XEL replaces XRAY
# https://www.nasdaq.com/about/press-center/xcel-energy-inc-join-nasdaq-100-index-beginning-november-19-2018
_test_one_swap(datetime.date.fromisoformat('2018-11-19'), 'XRAY', 'XEL', num_tickers_2018)
# 11/5/2018 NXPI replaces CA
# (link broken):
# https://business.nasdaq.com/mediacenter/pressreleases/1831989/nxp-semiconductors-nv-to-join-the-nasdaq-100-index-beginning-november-5-2018
_test_one_swap(datetime.date.fromisoformat('2018-11-05'), 'CA', 'NXPI', num_tickers_2018)
# 7/23/2018 PEP replaces DISH
_test_one_swap(datetime.date.fromisoformat('2018-07-23'), 'DISH', 'PEP', num_tickers_2018)
def test_tickers_2017() -> None:
num_tickers_2017: int = 104
# 2/7/2017 JBHT replaced NXPI
_test_one_swap(datetime.date.fromisoformat('2017-02-07'), 'NXPI', 'JBHT', num_tickers_2017)
# 3/20/2017 IDXX replaced SBAC
_test_one_swap(datetime.date.fromisoformat('2017-03-20'), 'SBAC', 'IDXX', num_tickers_2017)
# 4/24/2017 WYNN replaced TRIP
_test_one_swap(datetime.date.fromisoformat('2017-04-24'), 'TRIP', 'WYNN', num_tickers_2017)
# 6/19/2017 MELI replaced YHOO
_test_one_swap(datetime.date.fromisoformat('2017-06-19'), 'YHOO', 'MELI', num_tickers_2017)
# 10/23/2017 ALGN replaced MAT
_test_one_swap(datetime.date.fromisoformat('2017-10-23'), 'MAT', 'ALGN', num_tickers_2017)
# annual changes for 2017; effective Dec 18, 2017
# https://www.nasdaq.com/about/press-center/annual-changes-nasdaq-100-index-2
dec_18_removals = frozenset(('AKAM', 'DISCA', 'DISCK', 'NCLH', 'TSCO', 'VIAB'))
dec_18_additions = frozenset(('ASML', 'CDNS', 'SNPS', 'TTWO', 'WDAY'))
tickers_dec_17 = tickers_as_of(2017, 12, 17)
assert len(tickers_dec_17) == num_tickers_2017
assert dec_18_removals.issubset(tickers_dec_17)
assert tickers_dec_17.isdisjoint(dec_18_additions)
tickers_dec_18 = tickers_as_of(2017, 12, 18)
# this was a remove 6 and add 5 change due to two classes of Discovery Communications: DISCA and DISCK
assert len(tickers_dec_18) == num_tickers_2017 - 1
assert dec_18_additions.issubset(tickers_dec_18)
assert tickers_dec_18.isdisjoint(dec_18_removals)
def test_year_boundary_2016_2017() -> None:
_test_at_year_boundary(2017)
def test_2016_annual_changes() -> None:
# annual changes for 2016; effective Dec 19, 2016 announced Dec 9
# https://en.wikipedia.org/wiki/Nasdaq-100#Changes_in_2016
dec_18_tickers = tickers_as_of(2016, 12, 18)
dec_19_tickers = tickers_as_of(2016, 12, 19)
assert len(dec_18_tickers) == len(dec_19_tickers)
dec_19_removals = frozenset(('BBBY', 'NTAP', 'SRCL', 'WFM'))
assert dec_19_removals.issubset(dec_18_tickers)
assert dec_19_tickers.isdisjoint(dec_19_removals)
dec_19_additions = frozenset(('CTAS', 'HAS', 'HOLX', 'KLAC'))
assert dec_19_additions.isdisjoint(dec_18_tickers)
assert dec_19_additions.issubset(dec_19_tickers)
def test_tickers_2016() -> None:
num_tickers_2016_boy = 105 # num tickers at the start of 2016
num_tickers_2016_eoy = 104 # number of tickers at the end of 2016
assert len(tickers_as_of(2016, 1, 1)) == num_tickers_2016_boy
assert len(tickers_as_of(2016, 12, 31)) == num_tickers_2016_eoy
# https://ir.nasdaq.com/news-releases/news-release-details/csx-corporation-join-nasdaq-100-index-beginning-february-22-2016
_test_one_swap(datetime.date.fromisoformat('2016-02-22'), 'KLAC', 'CSX', num_tickers_2016_boy)
# https://www.nasdaq.com/about/press-center/netease-inc-join-nasdaq-100-index-beginning-march-16-2016
_test_one_swap(datetime.date.fromisoformat('2016-03-16'), 'SNDK', 'NTES', num_tickers_2016_boy)
# adds BATRA, BATRK as of Apr 18; no replacements
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-37
apr_17_tickers = tickers_as_of(2016, 4, 17)
assert len(apr_17_tickers) == 105
apr_18_tickers = tickers_as_of(2016, 4, 18)
assert len(apr_18_tickers) == 107
apr_18_additions = frozenset(('BATRA', 'BATRK'))
assert apr_18_additions.isdisjoint(apr_17_tickers)
assert apr_18_additions.issubset(apr_18_tickers)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-38
# this is a 4 for one change as of June 10
jun_09_tickers = tickers_as_of(2016, 6, 9)
assert len(jun_09_tickers) == 107
jun_10_tickers = tickers_as_of(2016, 6, 10)
assert len(jun_10_tickers) == 104
jun_10_removals = frozenset(('LMCA', 'LMCK', 'BATRA', 'BATRK'))
assert jun_10_removals.issubset(jun_09_tickers)
assert jun_10_tickers.isdisjoint(jun_10_removals)
jun_10_additions = frozenset(('XRAY',))
assert jun_10_additions.isdisjoint(jun_09_tickers)
assert jun_10_additions.issubset(jun_10_tickers)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-39
_test_one_swap(datetime.date.fromisoformat('2016-07-18'), 'ENDP', 'MCHP', num_tickers_2016_eoy)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-40
_test_one_swap(datetime.date.fromisoformat('2016-10-19'), 'LLTC', 'SHPG', num_tickers_2016_eoy)
| StarcoderdataPython |
3215720 | from aiocloudflare.commons.auth import Auth
class Policies(Auth):
_endpoint1 = "accounts"
_endpoint2 = "access/apps"
_endpoint3 = "policies"
| StarcoderdataPython |
99592 | <filename>marlin-firmware/buildroot/share/PlatformIO/scripts/mks_encrypt.py
import os,sys
Import("env")
from SCons.Script import DefaultEnvironment
board = DefaultEnvironment().BoardConfig()
# Encrypt ${PROGNAME}.bin and save it as build.firmware ('Robin.bin')
def encrypt(source, target, env):
key = [0xA3, 0xBD, 0xAD, 0x0D, 0x41, 0x11, 0xBB, 0x8D, 0xDC, 0x80, 0x2D, 0xD0, 0xD2, 0xC4, 0x9B, 0x1E, 0x26, 0xEB, 0xE3, 0x33, 0x4A, 0x15, 0xE4, 0x0A, 0xB3, 0xB1, 0x3C, 0x93, 0xBB, 0xAF, 0xF7, 0x3E]
firmware = open(target[0].path, "rb")
robin = open(target[0].dir.path +'/'+ board.get("build.firmware"), "wb")
length = os.path.getsize(target[0].path)
position = 0
try:
while position < length:
byte = firmware.read(1)
if position >= 320 and position < 31040:
byte = chr(ord(byte) ^ key[position & 31])
if sys.version_info[0] > 2:
byte = bytes(byte, 'latin1')
robin.write(byte)
position += 1
finally:
firmware.close()
robin.close()
if 'firmware' in board.get("build").keys():
env.AddPostAction("$BUILD_DIR/${PROGNAME}.bin", encrypt);
else:
print("You need to define output file via board_build.firmware = 'filename' parameter", file=sys.stderr)
exit(1);
| StarcoderdataPython |
4804402 | # cableController.py
# shotmanager
#
# The cable movement controller.
# Runs as a DroneKit-Python script.
#
# Created by <NAME> and <NAME> on 1/21/2015.
# Copyright (c) 2016 3D Robotics.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from catmullRom import CatmullRom
from vector3 import *
from numpy import linspace
import math
import threading
import itertools
# epsilon to detect if we've reached a target in meters
TARGET_EPSILON_M = 0.1
# Length of each segment that is assigned a maximum speed based on its maximum curvature
CURVATURE_MAP_RES = 1. # meters
def goldenSection(func, a, b, tol = 1e-5):
gr = 0.61803398875
c = b - gr * (b - a)
d = a + gr * (b - a)
fc = func(c)
fd = func(d)
while abs(c-d) > tol:
if fc < fd:
b = d
d = c
c = b - gr * (b - a)
fd = fc
fc = func(c)
else:
a = c
c = d
d = a + gr * (b - a)
fc = fd
fd = func(d)
return (b+a) / 2.
def constrain(val,minval,maxval):
if val < minval:
return minval
elif val > maxval:
return maxval
return val
class CableController():
def __init__(self, points, maxSpeed, minSpeed, tanAccelLim, normAccelLim, smoothStopP, maxAlt):
# Maximum tangential acceleration along the cable, m/s^2
self.tanAccelLim = tanAccelLim
# Maximum acceleration normal to the cable, m/s^2
self.normAccelLim = normAccelLim
# Smoothness of stops at the endpoints and at targets along the cable
self.smoothStopP = smoothStopP
# Maximum speed along the cable, m/s
self.maxSpeed = maxSpeed
# Minimum speed along the cable, m/s
self.minSpeed = minSpeed
# Minimum allowable position.z, meters (AKA max altitude), Convert Altitude (NEU) to NED
if maxAlt is not None:
self.posZLimit = -maxAlt
else:
self.posZLimit = None
# Input speed
self.desiredSpeed = 0.
# Current speed along the cable, m/s
self.speed = 0.
# Catmull-Rom spline with added virtual tangency control points at either end
self.spline = CatmullRom([points[0]*2 - points[1]]+points+[points[-1]*2 - points[-2]])
# Number of spline segments (should really come from CatmullRom)
self.numSegments = len(points)-1
# Current position in P domain, parameter normalized to cable total arc length
self.currentP = 1.0
# Target position in P domain
self.targetP = self.currentP
# Previously reached target, once set
self.prevReachedTarget = None
# Current segment, ranges from 0 to # of segments-1
self.currentSeg, self.currentU = self.spline.arclengthToNonDimensional(self.currentP)
# Current position as a Vector3, meters
self.position = self.spline.position(self.currentSeg, self.currentU)
# Current velocity as a Vector3, m/s
self.velocity = Vector3()
# Flag to indicate that the maximum altitude has been exceeded
self.maxAltExceeded = False
# Number of segments in curvature map
self.curvatureMapNumSegments = int(math.ceil(self.spline.totalArcLength/CURVATURE_MAP_RES))
# Number of joints in curvature map
self.curvatureMapNumJoints = self.curvatureMapNumSegments+1
# Curvature map joint positions in p domain
self.curvatureMapJointsP, self.curvatureMapSegLengthP = linspace(0., 1., self.curvatureMapNumJoints, retstep = True)
# Curvature map segment length in meters
self.curvatureMapSegLengthM = self.curvatureMapSegLengthP * self.spline.totalArcLength
# Non-dimensional curvature map joint position (cache)
self.curvatureMapJointsNonDimensional = [None for _ in range(self.curvatureMapNumJoints)]
# Speed limits for each curvature map segment (cache)
self.curvatureMapSpeedLimits = [None for _ in range(self.curvatureMapNumSegments)]
# Thread lock on curvature map segments
self.curvatureMapLocks = [threading.Lock() for _ in range(self.curvatureMapNumSegments)]
self.curvatureMapSegmentsComputedLock = threading.Lock()
# number of map segments that have been computed by the curvatureMapThread
self.curvatureMapSegmentsComputed = 0
# flag that indicates to the thread to die
self.poisonPill = False
# setup a worker thread to compute map segment maximum speeds
self.curvatureMapThread = threading.Thread(target=self._computeCurvatureMap)
self.curvatureMapThread.setDaemon(True)
# start the worker thread
self.curvatureMapThread.start()
def __del__(self):
self.poisonPill = True
self.curvatureMapThread.join(timeout = 2)
# Public interface:
def reachedTarget(self):
'''Return True if we've reached the target, else False'''
return abs(self.currentP - self.targetP) * self.spline.totalArcLength < TARGET_EPSILON_M
def setTargetP(self, targetP):
'''Interface to set a target P'''
self.targetP = targetP
def trackSpeed(self, speed):
'''Updates controller desired speed'''
self.desiredSpeed = speed
def update(self, dt):
'''Advances controller along cable by dt'''
# Speed always in direction of target
self.desiredSpeed = math.copysign(self.desiredSpeed, self.targetP - self.currentP)
self.speed = constrain(self._constrainSpeed(self.desiredSpeed), self.speed - self.tanAccelLim*dt, self.speed + self.tanAccelLim*dt)
self._traverse(dt)
def setCurrentP(self,p):
'''Sets the controller's current P position on the cable'''
self.currentP = p
self.currentSeg, self.currentU = self.spline.arclengthToNonDimensional(self.currentP)
def killCurvatureMapThread(self):
'''Sets poisonPill to True so the curvatureMapThread knows to die'''
self.poisonPill = True
# Internal functions:
def _computeCurvatureMap(self):
'''Computes curvature map, prioritizes map construction based on vehicle position and direction of motion'''
while True:
searchStart = self._getCurvatureMapSegment(self.currentP)
if self.speed > 0:
# Search ahead, then behind
for i in range(searchStart, self.curvatureMapNumSegments)+list(reversed(range(0, searchStart))):
if self._computeCurvatureMapSpeedLimit(i):
break
elif self.speed < 0:
# Search behind, then ahead
for i in list(reversed(range(0, searchStart+1)))+range(searchStart+1, self.curvatureMapNumSegments):
if self._computeCurvatureMapSpeedLimit(i):
break
else: # speed == 0
# Search alternately ahead and behind
searchList = [x for t in list(itertools.izip_longest(range(searchStart, self.curvatureMapNumSegments), reversed(range(0, searchStart)))) for x in t if x is not None]
for i in searchList:
if self._computeCurvatureMapSpeedLimit(i):
break
# if all map segments have been computed then quit the thread
with self.curvatureMapSegmentsComputedLock:
if self.curvatureMapSegmentsComputed == self.curvatureMapNumSegments:
self.poisonPill = True
if self.poisonPill:
break
def _computeCurvatureMapSpeedLimit(self, mapSeg):
'''Computes speed limit for the requested map segment'''
with self.curvatureMapLocks[mapSeg]:
# if the speed limit has already been computed for this map segment, then don't do any work
if self.curvatureMapSpeedLimits[mapSeg] is not None:
return False
# if non-dimensional parameter has not yet been created for the associated left joint, then create it
if self.curvatureMapJointsNonDimensional[mapSeg] is None:
self.curvatureMapJointsNonDimensional[mapSeg] = self.spline.arclengthToNonDimensional(self.curvatureMapJointsP[mapSeg])
# if non-dimensional parameter has not yet been created for the associated right joint, then create it
if self.curvatureMapJointsNonDimensional[mapSeg+1] is None:
self.curvatureMapJointsNonDimensional[mapSeg+1] = self.spline.arclengthToNonDimensional(self.curvatureMapJointsP[mapSeg+1])
# split returned non-dimensional parameter tuple (seg,u) into separate values
seg1, u1 = self.curvatureMapJointsNonDimensional[mapSeg]
seg2, u2 = self.curvatureMapJointsNonDimensional[mapSeg+1]
# returns arc length for current spline segment, or the larger of the two segments if our map segment spans across multiple spline segments
maxSegLen = max(self.spline.arcLengths[seg1:seg2+1]) # m
# run a golden section search to find the segment,u pair for the point of maximum curvature in the requested map segment
# (segment,u) are stored as segment+u, e.g. segment 1, u = 0.25 -> 1.25
maxCurvatureSegU = goldenSection(lambda x: -self.spline.curvature(int(x), x-int(x)), seg1+u1, seg2+u2, tol = 1e-1/maxSegLen)
# run a golden section search to find the segment,u pair for the point of minimum Z (aka max altitude)
minPosZSegU = goldenSection(lambda x: self.spline.position(int(x), x-int(x)).z, seg1+u1, seg2+u2, tol = 1e-1/maxSegLen)
# split segment+u into segment,u and evaluate curvature at this point
maxCurvature = self.spline.curvature(int(maxCurvatureSegU),maxCurvatureSegU-int(maxCurvatureSegU))
#split segment+u into segment,u and evalute position.z at this point
minPosZ = self.spline.position(int(minPosZSegU),minPosZSegU-int(minPosZSegU)).z #m
# this prevents the copter from traversing segments of the cable
# that are above its altitude limit
if self.posZLimit is not None and minPosZ < self.posZLimit:
self.maxAltExceeded = True
#this cable will breach the altitude limit, make the speed limit for this segment 0 to stop the vehicle
self.curvatureMapSpeedLimits[mapSeg] = 0.
else:
if maxCurvature != 0.:
# limit maxspeed by the max allowable normal acceleration at that point, bounded on the lower end by minSpeed
self.curvatureMapSpeedLimits[mapSeg] = max(math.sqrt(self.normAccelLim / maxCurvature), self.minSpeed)
else:
# if curvature is zero, means a straight segment
self.curvatureMapSpeedLimits[mapSeg] = self.maxSpeed
with self.curvatureMapSegmentsComputedLock:
self.curvatureMapSegmentsComputed += 1
return True
def _getCurvatureMapSpeedLimit(self, mapSeg):
'''Look up the speed limit for the requested map segment'''
# sanitize mapSeg
if mapSeg < 0 or mapSeg >= self.curvatureMapNumSegments:
return 0.
self._computeCurvatureMapSpeedLimit(mapSeg)
return self.curvatureMapSpeedLimits[mapSeg]
def _traverse(self, dt):
''' Advances the controller along the spline '''
spline_vel_unit = self.spline.velocity(self.currentSeg, self.currentU)
spline_vel_norm = spline_vel_unit.normalize()
# advances u by the amount specified by our speed and dt
self.currentU += self.speed * dt / spline_vel_norm
# handle traversing spline segments
if self.currentU > 1.:
if self.currentSeg < self.numSegments-1:
self.currentSeg += 1
self.currentU = 0. # NOTE: this truncates steps which cross spline joints
else:
self.currentU = 1.
elif self.currentU < 0.:
if self.currentSeg > 0:
self.currentSeg -= 1
self.currentU = 1. # NOTE: this truncates steps which cross spline joints
else:
self.currentU = 0.
# calculate our currentP
self.currentP = self.spline.nonDimensionalToArclength(self.currentSeg, self.currentU)[0]
# calculate our position and velocity commands
self.position = self.spline.position(self.currentSeg, self.currentU)
self.velocity = spline_vel_unit * self.speed
def _constrainSpeed(self, speed):
'''Looks ahead and behind current controller position and constrains to a speed limit'''
if speed > 0:
return min(self.maxSpeed, speed, self._getPosSpeedLimit(self.currentP))
elif speed < 0:
return max(-self.maxSpeed, speed, self._getNegSpeedLimit(self.currentP))
return speed
def _speedCurve(self, dist, speed):
'''Returns speed based on the sqrt function or a linear ramp (depending on dist)'''
linear_velocity = self.tanAccelLim / self.smoothStopP
linear_dist = linear_velocity / self.smoothStopP
if speed > linear_velocity:
return math.sqrt(2. * self.tanAccelLim * (speed**2/(2.*self.tanAccelLim) + dist))
else:
p1 = speed / self.smoothStopP
p2 = p1 + dist
if p2 > linear_dist:
return math.sqrt(2. * self.tanAccelLim * (p2 - 0.5*linear_dist))
else:
return p2 * self.smoothStopP
def _maxLookAheadDist(self):
'''Calculate how far it would take to come to a complete stop '''
linear_velocity = self.tanAccelLim / self.smoothStopP
linear_dist = linear_velocity / self.smoothStopP
if abs(self.speed) > linear_velocity:
return 0.5 * abs(self.speed)**2 / self.tanAccelLim + 0.5*linear_dist
else:
return abs(self.speed)/self.smoothStopP
def _getCurvatureMapSegment(self, p):
'''Get the curvature map segment index at the location p'''
return int(min(math.floor(p / self.curvatureMapSegLengthP),self.curvatureMapNumSegments-1))
def _getDistToCurvatureMapSegmentBegin(self, p1, idx):
'''Get distance from p1 to the beginning of the idx curvature map segment in meters'''
p2 = self.curvatureMapJointsP[idx]
return abs(p1-p2) * self.spline.totalArcLength
def _getDistToCurvatureMapSegmentEnd(self, p1, idx):
'''Get distance from p1 to the end of the idx curvature map segment in meters'''
p2 = self.curvatureMapJointsP[idx+1]
return abs(p1-p2) * self.spline.totalArcLength
def _getPosSpeedLimit(self, p):
'''Returns speed limit for a requested arc length normalized parameter, p, moving in the positive direction'''
# Identify our current curvature map segment
mapSeg = self._getCurvatureMapSegment(p)
# get speed limit for the upcoming curvature map segment
nextMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg+1)
# get distance (in meters) from current position to start of next curvature map segment
nextMapSegDist = self._getDistToCurvatureMapSegmentEnd(p, mapSeg)
# set speed limit to the minimum of the current curvature map segment and the transition to the next curvature map segment speed
speedLimit = min(self._getCurvatureMapSpeedLimit(mapSeg), self._speedCurve(nextMapSegDist, nextMapSegSpeed)) # m/s
# loop through all remaining segments in that direction
for mapSeg in range(mapSeg+1,self.curvatureMapNumSegments):
# increment distance by another curvature map segment length
nextMapSegDist += self.curvatureMapSegLengthM
# if that distance is greater than the distance it would take to stop, then break to save time (no need to look ahead any further)
if nextMapSegDist > self._maxLookAheadDist():
break
# get curvature map seg speed at this next segment
nextMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg+1) # NOTE: self.getCurvatureMapSpeedLimit(self.curvatureMapNumSegments) is 0
# limit us if the new map segment speed is slower than our current speed limit
speedLimit = min(speedLimit, self._speedCurve(nextMapSegDist, nextMapSegSpeed))
# if targetP is ahead of currentP then check for a speed limit to slow down at the target
if self.targetP >= self.currentP:
speedLimit = min(speedLimit, self._speedCurve(abs(self.targetP - self.currentP)*self.spline.totalArcLength, 0))
return speedLimit
def _getNegSpeedLimit(self, p):
'''Returns speed limit for a requested arc length normalized parameter, p, moving in the negative direction'''
# Identify our current curvature map segment
mapSeg = self._getCurvatureMapSegment(p)
# get speed limit for the previous curvature map segment
prevMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg-1)
# get distance (in meters) from current position to start of previous curvature map segment
prevMapSegDist = self._getDistToCurvatureMapSegmentBegin(p, mapSeg)
# set speed limit to the minimum of the current curvature map segment and the transition to the previous curvature map segment speed
speedLimit = min(self._getCurvatureMapSpeedLimit(mapSeg), self._speedCurve(prevMapSegDist, prevMapSegSpeed)) # m/s
# loop through all remaining segments in that direction
for mapSeg in reversed(range(0,mapSeg)):
# increment distance by another curvature map segment length
prevMapSegDist += self.curvatureMapSegLengthM
# if that distance is greater than the distance it would take to stop, then break to save time (no need to look ahead any further)
if prevMapSegDist > self._maxLookAheadDist():
break
# get curvature map seg speed at this previous segment
prevMapSegSpeed = self._getCurvatureMapSpeedLimit(mapSeg-1) # NOTE: self.getCurvatureMapSpeedLimit(-1) is 0
# limit us if the new map segment speed is slower than our current speed limit
speedLimit = min(speedLimit, self._speedCurve(prevMapSegDist, prevMapSegSpeed))
if self.targetP <= self.currentP:
speedLimit = min(speedLimit, self._speedCurve(abs(self.targetP - self.currentP)*self.spline.totalArcLength, 0))
return -speedLimit | StarcoderdataPython |
3235792 | <gh_stars>0
import aiohttp_cors
from aiohttp_graphql import GraphQLView
from graphql.execution.executors.asyncio import AsyncioExecutor
from ..schema import schema
from ..middlewares import AuthenticationMiddleware, dataloader_middleware
async def startup(app):
authentication = AuthenticationMiddleware(whitelist=[
['registerUser'],
['authenticate']
])
# Configure default CORS settings.
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
route = app.router.add_route(
'POST',
'/graphql',
dataloader_middleware,
GraphQLView(
schema=schema,
context={
'config': app['config'],
'db': app['mongo_db']
},
middleware=[authentication],
graphiql=True,
executor=AsyncioExecutor(),
enable_async=True)
,
name='grqphql')
cors.add(route)
async def shutdown(app):
pass
| StarcoderdataPython |
1743695 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('census_paleo', '0007_auto_20170321_1632'),
]
operations = [
migrations.AddField(
model_name='functional_traits',
name='trophic_reed',
field=models.CharField(blank=True, max_length=100, null=True, choices=[(b'B', b'leaves'), (b'C', b'meat'), (b'C/B', b'meat/bone'), (b'C/I', b'meat/invertebrates'), (b'FG', b'fresh grass'), (b'FL', b'fruit plus'), (b'G', b'grass'), (b'I', b'insects'), (b'MF', b'leaves and grass'), (b'OM', b'omnivorous'), (b'R', b'roots/bulbs')]),
preserve_default=True,
),
]
| StarcoderdataPython |
74982 | import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as Data
import numpy as np
import time
import sys
import utils
print('生成测试数据')
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = torch.ones(num_inputs, 1) * 0.01, 0.05
features = torch.randn((n_train + n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
train_features, test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:]
print('初始化模型参数')
def init_params():
w = torch.randn((num_inputs, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
return [w, b]
print('定义 L2 惩罚项')
def l2_penalty(w):
return (w**2).sum() / 2
print('定义训练和测试')
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = utils.linreg, utils.squared_loss
dataset = torch.utils.data.TensorDataset(train_features, train_labels)
train_iter = torch.utils.data.DataLoader(dataset, batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
l = loss(net(X, w, b), y) + lambd * l2_penalty(w)
l = l.sum()
if w.grad is not None:
w.grad.data.zero_()
b.grad.data.zero_()
l.backward()
utils.sgd([w, b], lr, batch_size)
train_ls.append(loss(net(train_features, w, b), train_labels).mean().item())
test_ls.append(loss(net(test_features, w, b), test_labels).mean().item())
utils.semilogy(range(1, num_epochs+1), train_ls, 'epochs', 'loss',
range(1, num_epochs+1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().item())
print('观察过拟合')
fit_and_plot(lambd=0)
print('使用权重衰减')
fit_and_plot(lambd=4)
| StarcoderdataPython |
1713101 | <reponame>wszeborowskimateusz/model-free-episodic-control<gh_stars>0
#!/usr/bin/env python3
import os
import random
import time
import gym
from mfec.agent import MFECAgent
from utils import Utils
from dqn.agent import DQNAgent, preprocess
ENVIRONMENT = "MsPacman-v0" # More games at: https://gym.openai.com/envs/#atari
AGENT_PATH = None#"agents/MFEC/MsPacman-v0_1609170206/agent.pkl"
# MFEC or DQN
ALGORITHM = 'DQN'
RENDER = True
RENDER_SPEED = 0.04
EPOCHS = 11
FRAMES_PER_EPOCH = 100000
SEED = 42
ACTION_BUFFER_SIZE = 1000000
K = 11
DISCOUNT = 1
EPSILON = 0.005
FRAMESKIP = 4 # Default gym-setting is (2, 5)
REPEAT_ACTION_PROB = 0.0 # Default gym-setting is .25
SCALE_HEIGHT = 84
SCALE_WIDTH = 84
STATE_DIMENSION = 64
# Maximum number of "do nothing" actions to be performed by the agent at the start of an episode
NO_OP_STEPS = {"DQN": 30, "MFEC": 0}
def main():
random.seed(SEED)
# Create agent-directory
execution_time = str(round(time.time()))
agent_dir = os.path.join("agents", ALGORITHM, ENVIRONMENT + "_" + execution_time)
os.makedirs(agent_dir)
# Initialize utils, environment and agent
utils = Utils(agent_dir, FRAMES_PER_EPOCH, EPOCHS * FRAMES_PER_EPOCH)
env = gym.make(ENVIRONMENT)
try:
env.env.frameskip = FRAMESKIP
env.env.ale.setFloat("repeat_action_probability", REPEAT_ACTION_PROB)
if ALGORITHM == 'MFEC':
if AGENT_PATH:
agent = MFECAgent.load(AGENT_PATH)
else:
agent = MFECAgent(
ACTION_BUFFER_SIZE,
K,
DISCOUNT,
EPSILON,
SCALE_HEIGHT,
SCALE_WIDTH,
STATE_DIMENSION,
range(env.action_space.n),
SEED,
)
else:
agent = DQNAgent(env.action_space.n)
if AGENT_PATH:
agent.load(AGENT_PATH)
run_algorithm(agent, agent_dir, env, utils)
finally:
utils.close()
env.close()
def run_algorithm(agent, agent_dir, env, utils):
frames_left = 0
for _ in range(EPOCHS):
frames_left += FRAMES_PER_EPOCH
while frames_left > 0:
episode_frames, episode_reward = run_episode(agent, env)
frames_left -= episode_frames
utils.end_episode(episode_frames, episode_reward)
utils.end_epoch()
agent.save(agent_dir)
def run_episode(agent, env):
episode_frames = 0
episode_reward = 0
env.seed(random.randint(0, 1000000))
observation = env.reset()
no_op_steps = NO_OP_STEPS[ALGORITHM]
if no_op_steps > 0:
for _ in range(random.randint(1, no_op_steps)):
last_observation = observation
observation, _, _, _ = env.step(0) # Do nothing
if ALGORITHM == 'DQN':
state = agent.get_initial_state(observation, last_observation)
done = False
while not done:
if RENDER:
env.render()
time.sleep(RENDER_SPEED)
last_observation = observation
if ALGORITHM == 'DQN':
action = agent.choose_action(state)
else:
action = agent.choose_action(observation)
observation, reward, done, _ = env.step(action)
if ALGORITHM == 'MFEC':
agent.receive_reward(reward)
if ALGORITHM == 'DQN':
processed_observation = preprocess(observation, last_observation)
state = agent.run(state, action, reward, done, processed_observation)
episode_reward += reward
episode_frames += FRAMESKIP
if ALGORITHM == 'MFEC':
agent.train()
return episode_frames, episode_reward
if __name__ == "__main__":
main()
| StarcoderdataPython |
3345757 | <reponame>vhirtham/weldx
"""Contains the serialization class for the weldx.core.TimeSeries."""
import numpy as np
import pint
from weldx.asdf.types import WeldxType
from weldx.constants import WELDX_QUANTITY as Q_
from weldx.core import TimeSeries
class TimeSeriesTypeASDF(WeldxType):
"""Serialization class for weldx.core.TimeSeries"""
name = "core/time_series"
version = "1.0.0"
types = [TimeSeries]
requires = ["weldx"]
handle_dynamic_subclasses = True
@classmethod
def to_tree(cls, node: TimeSeries, ctx):
"""
Convert an 'weldx.core.TimeSeries' instance into YAML representations.
Parameters
----------
node :
Instance of the 'weldx.core.TimeSeries' type to be serialized.
ctx :
An instance of the 'AsdfFile' object that is being written out.
Returns
-------
A basic YAML type ('dict', 'list', 'str', 'int', 'float', or
'complex') representing the properties of the 'weldx.core.TimeSeries'
type to be serialized.
"""
if isinstance(node.data, pint.Quantity):
if node.shape == tuple([1]): # constant
return {
"unit": str(node.units),
"value": node.data.magnitude[0],
}
else:
return {
"time": node.time,
"unit": str(node.units),
"shape": node.shape,
"interpolation": node.interpolation,
"values": node.data.magnitude,
}
return {"expression": node.data, "unit": str(node.units), "shape": node.shape}
@classmethod
def from_tree(cls, tree, ctx):
"""
Converts basic types representing YAML trees into an 'weldx.core.TimeSeries'.
Parameters
----------
tree :
An instance of a basic Python type (possibly nested) that
corresponds to a YAML subtree.
ctx :
An instance of the 'AsdfFile' object that is being constructed.
Returns
-------
weldx.core.TimeSeries :
An instance of the 'weldx.core.TimeSeries' type.
"""
if "value" in tree: # constant
values = Q_(np.asarray(tree["value"]), tree["unit"])
return TimeSeries(values)
elif "values" in tree:
time = tree["time"]
interpolation = tree["interpolation"]
values = Q_(tree["values"], tree["unit"])
return TimeSeries(values, time, interpolation)
return TimeSeries(tree["expression"]) # mathexpression
| StarcoderdataPython |
3217051 | <reponame>mrmanishprasadroy/Dash_Celery_Redis<gh_stars>0
import glob
import os
import datetime
import json
import numpy as np
import pandas as pd
from pandas import DataFrame
import time
from telegram_definition_L1 import *
from golabal_def import Dir_Path
# telegram directory (default)
tel_directory = Dir_Path
# initialisation
selTelegram_N02 = np.array([], dtype=teltype_N02)
appended_allTelegram_N02 = []
timeIndex = []
alltimeIndex = []
messageId = {
'N02': 'EF21',
}
def setup_data():
# initialisation
start_time = time.time()
allTelegram_N02 = np.array([], dtype=teltype_N02)
selTelegram_N02 = np.array([], dtype=teltype_N02)
timeIndex = []
# specificy telegram type
tel_directory_N02 = tel_directory + '\\*' + messageId["N02"] + '*.tel'
# get list of available files
filelist = glob.glob(tel_directory_N02)
# sort file list
filelist.sort(key=lambda x: os.path.getmtime(x))
if len(filelist) > 0:
for file in filelist:
f = open(file, 'rb')
one_telegram = np.fromfile(f, dtype=teltype_N02)
selTelegram_N02 = np.concatenate((selTelegram_N02, one_telegram))
timeIndex.append(datetime.datetime.fromtimestamp(os.path.getmtime(file)))
f.close()
elaps_time = "- %s seconds ---" % (time.time() - start_time)
print("N02: data found time" + elaps_time)
else:
print("N02: no data found")
# Alloy composition
df_chem = DataFrame(selTelegram_N02['AlloyComposition'][:, :7])
df_chem.columns = ['chem_1', 'chem_2', 'chem_3', 'chem_4', 'chem_5', 'chem_6', 'chem_7']
# ExitThick
df_ext_thick_G1 = DataFrame(selTelegram_N02['ExitThick'][:, 2])
df_ext_thick_G2 = DataFrame(selTelegram_N02['ExitThick'][:, 7])
df_ext_thick_G3 = DataFrame(selTelegram_N02['ExitThick'][:, 12])
df_ext_thick_G4 = DataFrame(selTelegram_N02['ExitThick'][:, 17])
df_ext_thick_G5 = DataFrame(selTelegram_N02['ExitThick'][:, 22])
df_ext_thick = pd.concat(
[df_ext_thick_G1, df_ext_thick_G2, df_ext_thick_G3, df_ext_thick_G4, df_ext_thick_G5],
axis=1, sort=False)
df_ext_thick.columns = ['ExitThick_G1', 'ExitThick_G2', 'ExitThick_G3', 'ExitThick_G4', 'ExitThick_G5']
# ExitTemp
df_ext_temp_G1 = DataFrame(selTelegram_N02['ExitTemp'][:, 2])
df_ext_temp_G2 = DataFrame(selTelegram_N02['ExitTemp'][:, 7])
df_ext_temp_G3 = DataFrame(selTelegram_N02['ExitTemp'][:, 12])
df_ext_temp_G4 = DataFrame(selTelegram_N02['ExitTemp'][:, 17])
df_ext_temp_G5 = DataFrame(selTelegram_N02['ExitTemp'][:, 22])
df_Exit_Temp = pd.concat(
[df_ext_temp_G1, df_ext_temp_G2, df_ext_temp_G3, df_ext_temp_G4, df_ext_temp_G5],
axis=1, sort=False)
df_Exit_Temp.columns = ['ExitTemp_G1', 'ExitTemp_G2', 'ExitTemp_G3', 'ExitTemp_G4', 'ExitTemp_G5']
# RollSpeed
df_RollSpeed_G1 = DataFrame(selTelegram_N02['RollSpeed'][:, 2]) # [:, :5])
df_RollSpeed_G2 = DataFrame(selTelegram_N02['RollSpeed'][:, 7]) # [:, 5:10])
df_RollSpeed_G3 = DataFrame(selTelegram_N02['RollSpeed'][:, 12]) # [:, 10:15])
df_RollSpeed_G4 = DataFrame(selTelegram_N02['RollSpeed'][:, 17]) # [:, 15:20])
df_RollSpeed_G5 = DataFrame(selTelegram_N02['RollSpeed'][:, 22]) # [:, 20:25])
df_RollSpeed = pd.concat(
[df_RollSpeed_G1, df_RollSpeed_G2, df_RollSpeed_G3, df_RollSpeed_G4, df_RollSpeed_G5],
axis=1, sort=False)
df_RollSpeed.columns = ['RollSpeed_G1', 'RollSpeed_G2', 'RollSpeed_G3', 'RollSpeed_G4', 'RollSpeed_G5']
# TensionEntry
df_TensionEntry_G1 = DataFrame(selTelegram_N02['TensionEntry'][:, 2]) # [:, :5])
df_TensionEntry_G2 = DataFrame(selTelegram_N02['TensionEntry'][:, 7]) # [:, 5:10])
df_TensionEntry_G3 = DataFrame(selTelegram_N02['TensionEntry'][:, 12]) # [:, 10:15])
df_TensionEntry_G4 = DataFrame(selTelegram_N02['TensionEntry'][:, 17]) # [:, 15:20])
df_TensionEntry_G5 = DataFrame(selTelegram_N02['TensionEntry'][:, 22]) # [:, 20:25])
df_TensionEntry = pd.concat(
[df_TensionEntry_G1, df_TensionEntry_G2, df_TensionEntry_G3, df_TensionEntry_G4, df_TensionEntry_G5],
axis=1, sort=False)
df_TensionEntry.columns = ['TensionEntry_G1', 'TensionEntry_G2', 'TensionEntry_G3', 'TensionEntry_G4',
'TensionEntry_G5']
# TensionExit
df_TensionExit_G1 = DataFrame(selTelegram_N02['TensionExit'][:, 2]) # [:, :5])
df_TensionExit_G2 = DataFrame(selTelegram_N02['TensionExit'][:, 7]) # [:, 5:10])
df_TensionExit_G3 = DataFrame(selTelegram_N02['TensionExit'][:, 12]) # [:, 10:15])
df_TensionExit_G4 = DataFrame(selTelegram_N02['TensionExit'][:, 17]) # [:, 15:20])
df_TensionExit_G5 = DataFrame(selTelegram_N02['TensionExit'][:, 22]) # [:, 20:25])
df_TensionExit = pd.concat(
[df_TensionExit_G1, df_TensionExit_G2, df_TensionExit_G3, df_TensionExit_G4, df_TensionExit_G5], axis=1,
sort=False)
df_TensionExit.columns = ['TensionExit_G1', 'TensionExit_G2', 'TensionExit_G3', 'TensionExit_G4', 'TensionExit_G5']
# RollForceOS
df_RollForceOS_G1 = DataFrame(selTelegram_N02['RollForceOS'][:, 2]) # [:, :5])
df_RollForceOS_G2 = DataFrame(selTelegram_N02['RollForceOS'][:, 7]) # [:, 5:10])
df_RollForceOS_G3 = DataFrame(selTelegram_N02['RollForceOS'][:, 12]) # [:, 10:15])
df_RollForceOS_G4 = DataFrame(selTelegram_N02['RollForceOS'][:, 17]) # [:, 15:20])
df_RollForceOS_G5 = DataFrame(selTelegram_N02['RollForceOS'][:, 22]) # [:, 20:25])
df_RollForceOS = pd.concat(
[df_RollForceOS_G1, df_RollForceOS_G2, df_RollForceOS_G3, df_RollForceOS_G4, df_RollForceOS_G5], axis=1,
sort=False)
df_RollForceOS.columns = ['RollForceOS_G1', 'RollForceOS_G2', 'RollForceOS_G3', 'RollForceOS_G4', 'RollForceOS_G5']
# RollForceDS
df_RollForceDS_G1 = DataFrame(selTelegram_N02['RollForceDS'][:, 2]) # [:, :5])
df_RollForceDS_G2 = DataFrame(selTelegram_N02['RollForceDS'][:, 7]) # [:, 5:10])
df_RollForceDS_G3 = DataFrame(selTelegram_N02['RollForceDS'][:, 12]) # [:, 10:15])
df_RollForceDS_G4 = DataFrame(selTelegram_N02['RollForceDS'][:, 17]) # [:, 15:20])
df_RollForceDS_G5 = DataFrame(selTelegram_N02['RollForceDS'][:, 22]) # [:, 20:25])
df_RollForceDS = pd.concat(
[df_RollForceDS_G1, df_RollForceDS_G2, df_RollForceDS_G3, df_RollForceDS_G4, df_RollForceDS_G5], axis=1,
sort=False)
df_RollForceDS.columns = ['RollForceDS_G1', 'RollForceDS_G2', 'RollForceDS_G3', 'RollForceDS_G4', 'RollForceDS_G5']
# BendWROS
df_BendWROS_G1 = DataFrame(selTelegram_N02['BendWROS'][:, 2]) # [:, :5])
df_BendWROS_G2 = DataFrame(selTelegram_N02['BendWROS'][:, 7]) # [:, 5:10])
df_BendWROS_G3 = DataFrame(selTelegram_N02['BendWROS'][:, 12]) # [:, 10:15])
df_BendWROS_G4 = DataFrame(selTelegram_N02['BendWROS'][:, 17]) # [:, 15:20])
df_BendWROS_G5 = DataFrame(selTelegram_N02['BendWROS'][:, 22]) # [:, 20:25])
df_BendWROS = pd.concat(
[df_BendWROS_G1, df_BendWROS_G2, df_BendWROS_G3, df_BendWROS_G4, df_BendWROS_G5], axis=1,
sort=False)
df_BendWROS.columns = ['BendWROS_G1', 'BendWROS_G2', 'BendWROS_G3', 'BendWROS_G4', 'BendWROS_G5']
# BendWRDS
df_BendWRDS_G1 = DataFrame(selTelegram_N02['BendWRDS'][:, 2]) # [:, :5])
df_BendWRDS_G2 = DataFrame(selTelegram_N02['BendWRDS'][:, 7]) # [:, 5:10])
df_BendWRDS_G3 = DataFrame(selTelegram_N02['BendWRDS'][:, 12]) # [:, 10:15])
df_BendWRDS_G4 = DataFrame(selTelegram_N02['BendWRDS'][:, 17]) # [:, 15:20])
df_BendWRDS_G5 = DataFrame(selTelegram_N02['BendWRDS'][:, 22]) # [:, 20:25])
df_BendWRDS = pd.concat(
[df_BendWRDS_G1, df_BendWRDS_G2, df_BendWRDS_G3, df_BendWRDS_G4, df_BendWRDS_G5], axis=1,
sort=False)
df_BendWRDS.columns = ['BendWRDS_G1', 'BendWRDS_G2', 'BendWRDS_G3', 'BendWRDS_G4', 'BendWRDS_G5']
# BendIROS
df_BendIROS_G1 = DataFrame(selTelegram_N02['BendIROS'][:, 2]) # [:, :5])
df_BendIROS_G2 = DataFrame(selTelegram_N02['BendIROS'][:, 7]) # [:, 5:10])
df_BendIROS_G3 = DataFrame(selTelegram_N02['BendIROS'][:, 12]) # [:, 10:15])
df_BendIROS_G4 = DataFrame(selTelegram_N02['BendIROS'][:, 17]) # [:, 15:20])
df_BendIROS_G5 = DataFrame(selTelegram_N02['BendIROS'][:, 22]) # [:, 20:25])
df_BendIROS = pd.concat(
[df_BendIROS_G1, df_BendIROS_G2, df_BendIROS_G3, df_BendIROS_G4, df_BendIROS_G5], axis=1,
sort=False)
df_BendIROS.columns = ['BendIROS_G1', 'BendIROS_G2', 'BendIROS_G3', 'BendIROS_G4', 'BendIROS_G5']
# BendIRDS
df_BendIRDS_G1 = DataFrame(selTelegram_N02['BendIRDS'][:, 2]) # [:, :5])
df_BendIRDS_G2 = DataFrame(selTelegram_N02['BendIRDS'][:, 7]) # [:, 5:10])
df_BendIRDS_G3 = DataFrame(selTelegram_N02['BendIRDS'][:, 12]) # [:, 10:15])
df_BendIRDS_G4 = DataFrame(selTelegram_N02['BendIRDS'][:, 17]) # [:, 15:20])
df_BendIRDS_G5 = DataFrame(selTelegram_N02['BendIRDS'][:, 22]) # [:, 20:25])
df_BendIRDS = pd.concat(
[df_BendIRDS_G1, df_BendIRDS_G2, df_BendIRDS_G3, df_BendIRDS_G4, df_BendIRDS_G5], axis=1,
sort=False)
df_BendIRDS.columns = ['BendIRDS_G1', 'BendIRDS_G2', 'BendIRDS_G3', 'BendIRDS_G4', 'BendIRDS_G5']
# ShiftCVC
df_ShiftCVC_G1 = DataFrame(selTelegram_N02['ShiftCVC'][:, 2]) # [:, :5])
df_ShiftCVC_G2 = DataFrame(selTelegram_N02['ShiftCVC'][:, 7]) # [:, 5:10])
df_ShiftCVC_G3 = DataFrame(selTelegram_N02['ShiftCVC'][:, 12]) # [:, 10:15])
df_ShiftCVC_G4 = DataFrame(selTelegram_N02['ShiftCVC'][:, 17]) # [:, 15:20])
df_ShiftCVC_G5 = DataFrame(selTelegram_N02['ShiftCVC'][:, 22]) # [:, 20:25])
df_ShiftCVC = pd.concat(
[df_ShiftCVC_G1, df_ShiftCVC_G2, df_ShiftCVC_G3, df_ShiftCVC_G4, df_ShiftCVC_G5], axis=1,
sort=False)
df_ShiftCVC.columns = ['ShiftCVC_G1', 'ShiftCVC_G2', 'ShiftCVC_G3', 'ShiftCVC_G4', 'ShiftCVC_G5']
# SlipForward
df_SlipForward_G1 = DataFrame(selTelegram_N02['SlipForward'][:, 2]) # [:, :5])
df_SlipForward_G2 = DataFrame(selTelegram_N02['SlipForward'][:, 7]) # [:, 5:10])
df_SlipForward_G3 = DataFrame(selTelegram_N02['SlipForward'][:, 12]) # [:, 10:15])
df_SlipForward_G4 = DataFrame(selTelegram_N02['SlipForward'][:, 17]) # [:, 15:20])
df_SlipForward_G5 = DataFrame(selTelegram_N02['SlipForward'][:, 22]) # [:, 20:25])
df_SlipForward = pd.concat(
[df_SlipForward_G1, df_SlipForward_G2, df_SlipForward_G3, df_SlipForward_G4, df_SlipForward_G5], axis=1,
sort=False)
df_SlipForward.columns = ['SlipForward_G1', 'SlipForward_G2', 'SlipForward_G3', 'SlipForward_G4', 'SlipForward_G5']
# HydPosOS
df_HydPosOS_G1 = DataFrame(selTelegram_N02['HydPosOS'][:, 2]) # [:, :5])
df_HydPosOS_G2 = DataFrame(selTelegram_N02['HydPosOS'][:, 7]) # [:, 5:10])
df_HydPosOS_G3 = DataFrame(selTelegram_N02['HydPosOS'][:, 12]) # [:, 10:15])
df_HydPosOS_G4 = DataFrame(selTelegram_N02['HydPosOS'][:, 17]) # [:, 15:20])
df_HydPosOS_G5 = DataFrame(selTelegram_N02['HydPosOS'][:, 22]) # [:, 20:25])
df_HydPosOS = pd.concat(
[df_HydPosOS_G1, df_HydPosOS_G2, df_HydPosOS_G3, df_HydPosOS_G4, df_HydPosOS_G5], axis=1,
sort=False)
df_HydPosOS.columns = ['HydPosOS_G1', 'HydPosOS_G2', 'HydPosOS_G3', 'HydPosOS_G4', 'HydPosOS_G5']
# HydPosDS
df_HydPosDS_G1 = DataFrame(selTelegram_N02['HydPosDS'][:, 2]) # [:, :5])
df_HydPosDS_G2 = DataFrame(selTelegram_N02['HydPosDS'][:, 7]) # [:, 5:10])
df_HydPosDS_G3 = DataFrame(selTelegram_N02['HydPosDS'][:, 12]) # [:, 10:15])
df_HydPosDS_G4 = DataFrame(selTelegram_N02['HydPosDS'][:, 17]) # [:, 15:20])
df_HydPosDS_G5 = DataFrame(selTelegram_N02['HydPosDS'][:, 22]) # [:, 20:25])
df_HydPosDS = pd.concat(
[df_HydPosDS_G1, df_HydPosDS_G2, df_HydPosDS_G3, df_HydPosDS_G4, df_HydPosDS_G5], axis=1,
sort=False)
df_HydPosDS.columns = ['HydPosDS_G1', 'HydPosDS_G2', 'HydPosDS_G3', 'HydPosDS_G4', 'HydPosDS_G5']
# DriveTorque
df_DriveTorque_G1 = DataFrame(selTelegram_N02['DriveTorque'][:, 2]) # [:, :5])
df_DriveTorque_G2 = DataFrame(selTelegram_N02['DriveTorque'][:, 7]) # [:, 5:10])
df_DriveTorque_G3 = DataFrame(selTelegram_N02['DriveTorque'][:, 12]) # [:, 10:15])
df_DriveTorque_G4 = DataFrame(selTelegram_N02['DriveTorque'][:, 17]) # [:, 15:20])
df_DriveTorque_G5 = DataFrame(selTelegram_N02['DriveTorque'][:, 22]) # [:, 20:25])
df_DriveTorque = pd.concat(
[df_DriveTorque_G1, df_DriveTorque_G2, df_DriveTorque_G3, df_DriveTorque_G4, df_DriveTorque_G5], axis=1,
sort=False)
df_DriveTorque.columns = ['DriveTorque_G1', 'DriveTorque_G2', 'DriveTorque_G3', 'DriveTorque_G4', 'DriveTorque_G5']
df1 = DataFrame({'Time': timeIndex,
'CoilId': selTelegram_N02['CoilId'][:],
'CoilIdOut': selTelegram_N02['CoilIdOut'][:],
'SeqCoilOut': selTelegram_N02['SeqCoilOut'][:],
'SetupNo': selTelegram_N02['SetupNo'][:],
'ReturnCode': selTelegram_N02['ReturnCode'][:],
'SetupValidCode': selTelegram_N02['SetupValidCode'][:],
'NoPasses': selTelegram_N02['NoPasses'][:],
'AlloyCode': selTelegram_N02['AlloyCode'][:],
'AnalysisFlag': selTelegram_N02['AnalysisFlag'][:],
'Width': selTelegram_N02['Width'][:],
'LengthStart': selTelegram_N02['LengthStart'][:, ],
'Length0': selTelegram_N02['Length0'][:],
'Length1_G1': selTelegram_N02['Length1'][:, 0],
'Length1_G2': selTelegram_N02['Length1'][:, 1],
'Length1_G3': selTelegram_N02['Length1'][:, 2],
'Length1_G4': selTelegram_N02['Length1'][:, 3],
'Length1_G5': selTelegram_N02['Length1'][:, 3],
'EntryThick': selTelegram_N02['EntryThick'][:, 0],
'EntryTemp': selTelegram_N02['EntryTemp'][:, 1],
'const_force_mode': selTelegram_N02['ConstForceMode'][:],
'flag_setup_trans_mode': selTelegram_N02['FlagSetupTransMode'][:],
'return_code': selTelegram_N02['ReturnCode'][:],
'setup_valid_code': selTelegram_N02['SetupValidCode'][:],
'thread_speed_mode': selTelegram_N02['ThreadSpeedMode'][:],
'threading_mode': selTelegram_N02['ThreadingMode'][:],
'tail_out_mode': selTelegram_N02['TailOutMode'][:],
'ThreadAssist': selTelegram_N02['ThreadAssist'][:],
'SpoolInd': selTelegram_N02['SpoolInd'][:],
'SpoolOuterDiam': selTelegram_N02['SpoolOuterDiam'][:],
'SpoolWidth': selTelegram_N02['SpoolWidth'][:],
'TargetTransLength': selTelegram_N02['TargetTransLength'][:],
'TargetPosWeldSeam': selTelegram_N02['TargetPosWeldSeam'][:],
'TargetThickHeadLength': selTelegram_N02['TargetThickHeadLength'][:],
'ArtifSleeveUsage': selTelegram_N02['ArtifSleeveUsage'][:],
'TensionCurveID': selTelegram_N02['TensionCurveID'][:],
'TensionCurveNoPos': selTelegram_N02['TensionCurveNoPos'][:],
'yield_strength_calc': selTelegram_N02['YieldStrengthCalc'][:],
'StandSwitchOff_G1 ': selTelegram_N02['StandSwitchOff'][:, 0],
'StandSwitchOff_G2 ': selTelegram_N02['StandSwitchOff'][:, 1],
'StandSwitchOff_G3 ': selTelegram_N02['StandSwitchOff'][:, 2],
'StandSwitchOff_G4 ': selTelegram_N02['StandSwitchOff'][:, 3],
'StandSwitchOff_G5 ': selTelegram_N02['StandSwitchOff'][:, 4],
'TargetCoilTempLimit': selTelegram_N02['TargetCoilTempLimit'][:],
'ThermalCrown_G1 ': selTelegram_N02['ThermalCrown'][:, 0],
'ThermalCrown_G2 ': selTelegram_N02['ThermalCrown'][:, 1],
'ThermalCrown_G3 ': selTelegram_N02['ThermalCrown'][:, 2],
'ThermalCrown_G4 ': selTelegram_N02['ThermalCrown'][:, 3],
'ThermalCrown_G5 ': selTelegram_N02['ThermalCrown'][:, 4],
'FfcCtrlUsage_G1 ': selTelegram_N02['FfcCtrlUsage'][:, 0],
'FfcCtrlUsage_G2 ': selTelegram_N02['FfcCtrlUsage'][:, 1],
'FfcCtrlUsage_G3 ': selTelegram_N02['FfcCtrlUsage'][:, 2],
'FfcCtrlUsage_G4 ': selTelegram_N02['FfcCtrlUsage'][:, 3],
'FfcCtrlUsage_G5 ': selTelegram_N02['FfcCtrlUsage'][:, 4],
'FbcCtrlUsage_G1 ': selTelegram_N02['FbcCtrlUsage'][:, 0],
'FbcCtrlUsage_G2 ': selTelegram_N02['FbcCtrlUsage'][:, 1],
'FbcCtrlUsage_G3 ': selTelegram_N02['FbcCtrlUsage'][:, 2],
'FbcCtrlUsage_G4 ': selTelegram_N02['FbcCtrlUsage'][:, 3],
'FbcCtrlUsage_G5 ': selTelegram_N02['FbcCtrlUsage'][:, 4],
'VfcCtrlUsage_G1 ': selTelegram_N02['VfcCtrlUsage'][:, 0],
'VfcCtrlUsage_G2 ': selTelegram_N02['VfcCtrlUsage'][:, 1],
'VfcCtrlUsage_G3 ': selTelegram_N02['VfcCtrlUsage'][:, 2],
'VfcCtrlUsage_G4 ': selTelegram_N02['VfcCtrlUsage'][:, 3],
'VfcCtrlUsage_G5 ': selTelegram_N02['VfcCtrlUsage'][:, 4]
})
export_database = pd.concat([df1, df_ext_thick, df_Exit_Temp, df_RollSpeed,
df_TensionEntry, df_TensionExit, df_RollForceOS,
df_RollForceDS, df_BendWROS, df_BendWRDS, df_BendIROS,
df_BendIRDS, df_ShiftCVC, df_SlipForward, df_HydPosOS, df_HydPosDS, df_DriveTorque,
df_chem],
axis=1, sort=False)
arr_coilids = pd.DataFrame(selTelegram_N02['CoilIdOut'][:], columns=['CoilIdOut'])
datasets = {
'df_00': arr_coilids.to_json(orient='split', date_format='iso'),
'df_01': export_database.to_json(orient='split', date_format='iso'),
}
elaps1_time = "- %s seconds ---" % (time.time() - start_time)
print(elaps1_time + 'setup_data compile')
return json.dumps(datasets)
| StarcoderdataPython |
1627136 | <filename>maths/factorial_recursive.py
'''THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR ANYONE
DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR OTHER LIABILITY,
WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
# Bitcoin Cash (BCH) qpz32c4lg7x7lnk9jg6qg7s4uavdce89myax5v5nuk
# Ether (ETH) - 0x843d3DEC2A4705BD4f45F674F641cE2D0022c9FB
# Litecoin (LTC) - Lfk5y4F7KZa9oRxpazETwjQnHszEPvqPvu
# Bitcoin (BTC) - 34L8qWiQyKr8k4TnHDacfjbaSqQASbBtTd
# contact :- <EMAIL>
def fact(n):
"""
Return 1, if n is 1 or below,
otherwise, return n * fact(n-1).
"""
return 1 if n <= 1 else n * fact(n - 1)
"""
Show factorial for i,
where i ranges from 1 to 20.
"""
for i in range(1, 21):
print(i, ": ", fact(i), sep="")
| StarcoderdataPython |
51715 | #coding:UTF-8
import os
import discord
from discord.ext import tasks
from datetime import datetime
token = os.environ['DISCORD_BOT_TOKEN'] #トークン
channel_id = os.environ['CHANNEL_ID'] #チャンネルID
# 接続に必要なオブジェクトを生成
client = discord.Client()
@tasks.loop(seconds=60)
async def loop():
print(datetime.now().strftime("%Y/%m/%d %H:%M:%S"), "start")
print(client.is_ready())
channel = client.get_channel(channel_id)
if channel != None :
print(channel)
await channel.send('てすと')
@client.event
async def on_ready():
#ループ処理実行
loop.start()
# Botの起動とDiscordサーバーへの接続
client.run(token)
| StarcoderdataPython |
4829363 | <reponame>mayashap/tasking-manager
import geojson
import json
import os
from typing import Tuple
import xml.etree.ElementTree as ET
from backend.models.dtos.project_dto import DraftProjectDTO
from backend.models.postgis.project import Project
from backend.models.postgis.statuses import TaskStatus
from backend.models.postgis.task import Task
from backend.models.postgis.user import User
TEST_USER_ID = 1234
def get_canned_osm_user_details():
""" Helper method to find test file, dependent on where tests are being run from """
location = os.path.join(
os.path.dirname(__file__), "test_files", "osm_user_details.xml"
)
try:
with open(location, "r"):
return ET.parse(location)
except FileNotFoundError:
raise FileNotFoundError("osm_user_details.xml not found")
def get_canned_osm_user_details_changed_name():
""" Helper method to find test file, dependent on where tests are being run from """
location = os.path.join(
os.path.dirname(__file__), "test_files", "osm_user_details_changed_name.xml"
)
try:
with open(location, "r"):
return ET.parse(location)
except FileNotFoundError:
raise FileNotFoundError("osm_user_details_changed_name.xml not found")
def get_canned_json(name_of_file):
""" Read canned Grid request from file """
location = os.path.join(os.path.dirname(__file__), "test_files", name_of_file)
try:
with open(location, "r") as grid_file:
data = json.load(grid_file)
return data
except FileNotFoundError:
raise FileNotFoundError("json file not found")
def get_canned_simplified_osm_user_details():
""" Helper that reads file and returns it as a string """
location = os.path.join(
os.path.dirname(__file__), "test_files", "osm_user_details_simple.xml"
)
with open(location, "r") as osm_file:
data = osm_file.read().replace("\n", "")
return data
def create_canned_user() -> User:
""" Generate a canned user in the DB """
test_user = User()
test_user.username = "Thinkwhere TEST"
test_user.mapping_level = 1
test_user.create()
return test_user
def get_canned_user(username: str) -> User:
test_user = User().get_by_username(username)
return test_user
def create_canned_project() -> Tuple[Project, User]:
""" Generates a canned project in the DB to help with integration tests """
test_aoi_geojson = geojson.loads(json.dumps(get_canned_json("test_aoi.json")))
task_feature = geojson.loads(json.dumps(get_canned_json("splittable_task.json")))
task_non_square_feature = geojson.loads(
json.dumps(get_canned_json("non_square_task.json"))
)
test_user = get_canned_user("Thinkwhere TEST")
if test_user is None:
test_user = create_canned_user()
test_project_dto = DraftProjectDTO()
test_project_dto.project_name = "Test"
test_project_dto.user_id = test_user.id
test_project_dto.area_of_interest = test_aoi_geojson
test_project = Project()
test_project.create_draft_project(test_project_dto)
test_project.set_project_aoi(test_project_dto)
test_project.total_tasks = 2
# Setup test task
test_task = Task.from_geojson_feature(1, task_feature)
test_task.task_status = TaskStatus.MAPPED.value
test_task.mapped_by = test_user.id
test_task.is_square = True
test_task2 = Task.from_geojson_feature(2, task_non_square_feature)
test_task2.task_status = TaskStatus.READY.value
test_task2.is_square = False
test_project.tasks.append(test_task)
test_project.tasks.append(test_task2)
test_project.create()
return test_project, test_user
| StarcoderdataPython |
1772721 | <reponame>INGEOTEC/Python-Course
a = 2 // 3
print(type(a))
assert type(a) != int
assert isinstance(a, int)
def add(a, b):
"""Add two numbers"""
return a + b
add(12, "23")
def add(a, b):
for x in [a, b]:
f = isinstance(x, float)
f = f or isinstance(x, int)
assert f
return a + b
add("12", 3)
assert add(21, 1.2) == 22.2
def even_numbers(n):
for i in range(n + 1):
if is_even(i):
print("Even", i)
def is_even(a):
return a % 2 == 0
even_numbers(11)
def add(a, b):
"""Add two numbers"""
for x in [a, b]:
f = isinstance(x, float)
f = f or isinstance(x, int)
assert f
return a + b
def gcd(a, b):
"""Greatest Common Divisor"""
assert isinstance(a, int)
assert isinstance(b, int)
gcd(12.2, 23) | StarcoderdataPython |
1739667 | <gh_stars>1-10
from spotipy.oauth2 import SpotifyClientCredentials
from spotipy import Spotify
import json
from sys import argv
def spotify_authentication(client_id, client_secret, playlist_id):
"""
This part verifies the credentials for the spotify-api usage
Args:
client_id:
client_secret:
playlist_id: Playlist id in the link of the playlist
Returns:
All the data related to the playlists
"""
auth_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
sp = Spotify(auth_manager=auth_manager)
return sp.playlist(playlist_id)
def spotify_playlist_info(client_id, client_secret, playlist_id):
"""
This function retrives the track information inside of a playlist
It inputs client_id and client_secret from spotify dashboard
Along with a playlist_id from the link of the playlist.
"""
results = spotify_authentication(client_id, client_secret, playlist_id)
track_list = []
#print("Song - Artist - Album\n")
for item in results['tracks']['items']:
track_list.append(item['track']['name'] + ' - ' +
item['track']['artists'][0]['name'])
# For album name in the search query uncomment this
#+ ' - ' +
#item['track']['album']['name'])
#print(track_list)
return track_list
def output_json(client_id, client_secret, playlist_id):
"""
To output a json file from it
"""
results = spotify_authentication(client_id, client_secret, playlist_id)
# Creating a data structure for json format.
result_dict = {
'tracks': {
'items': [],
'limit': 100,
'next': None,
'offset': 0,
'previous': None,
'total': 16
},
'type': 'playlist',
'uri': playlist_id
}
for item in results['tracks']['items']:
track_dict = {
'track': {
'album': {
'name': item['track']['album']['name'],
},
'artists': [{
'name': item['track']['artists'][0]['name'],
}],
'name': item['track']['name'],
}
}
result_dict['tracks']['items'].append(track_dict)
# Append the track dict structure to your results dict structure
out_file = open('track.json','w')
json.dump(result_dict, out_file, indent = 4)
# Sample Script
"""
c_id = str(argv[1])
c_secret = str(argv[2])
p_id = str(argv[3])
p_id = p_id[34:]
spotify_playlist_info(c_id, c_secret, p_id)
""" | StarcoderdataPython |
16807 | import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_SimpleAnalysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_1D_analysis_multi_file(self):
a = ma.Basic1DAnalysis(t_start='20170726_164507',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_1D_analysis_single_file(self):
# giving only a single file
a = ma.Basic1DAnalysis(t_start='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertEqual(a.timestamps, ['20170726_164845'])
def test_2D_analysis_multi_file(self):
# N.B. by setting x2, x2_label and x2_unit in the options dict
# the values can be plotted versus the varied parameter between
# the linecuts
a = ma.Basic2DAnalysis(t_start='20170726_164521',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_2D_interpolated(self):
a=ma.Basic2DInterpolatedAnalysis(t_start='20180522_030206')
fig_keys = list(a.figs.keys())
exp_list_keys = ['Cost function value', 'Conditional phase',
'offset difference']
self.assertEqual(fig_keys, exp_list_keys)
@unittest.skip('FIXME: disabled, see PR #643')
def test_1D_binned_analysis(self):
a=ma.Basic1DBinnedAnalysis(label='120543_Single_qubit_GST_QL')
| StarcoderdataPython |
3275547 | <reponame>malaterre/vtk-dicom
"""
Generate tables for converting GB18030 multi-byte to Unicode
The input arguments should be the gb-18030-2005.ucm file from here:
http://source.icu-project.org/repos/icu/data/trunk/charset/data/ucm/
Two tables must be generated. One for the 24066 two-byte codes, and
another for the four-byte codes.
"""
import sys
import os.path
fname = sys.argv[1]
f = open(fname,'r')
lines = f.readlines();
f.close()
m2 = {}
m4 = {}
for l in lines:
l = l.strip()
if l[0:2] == "<U" and l[6] == ">":
u = int(l[2:6],16)
elif l[0:2] == "<U" and l[7] == ">":
u = int(l[2:7],16)
l = l[1:]
else:
continue
if l[12:14] != "\\x":
# one-byte code is identical to ASCII
continue
elif l[16:18] != "\\x":
# two-byte code
a = int(l[10:12],16)
b = int(l[14:16],16)
if b > 0x7f:
b -= 1
g = (a - 0x81)*190 + (b - 0x40)
m = m2
else:
# four-byte code
a = int(l[10:12],16)
b = int(l[14:16],16)
c = int(l[18:20],16)
d = int(l[22:24],16)
aa = (a - 0x81)*10 + (b - 0x30)
bb = (c - 0x81)*10 + (d - 0x30)
g = aa*1260 + bb
m = m4
if m.has_key(g):
if l[-1] == '1':
sys.stderr.write("duplicate: 0x%04X 0x%04X, 0x%04X\n" % (g,u,m[g]))
# prefer the original
m[g] = u
else:
sys.stderr.write("duplicate: 0x%04X 0x%04X, 0x%04X\n" % (g,m[g],u))
else:
m[g] = u
for k in range(0,23940):
u = 0xFFFD
try:
u = m2[k]
except KeyError:
pass
if u > 0xFFFF:
sys.stderr.write("too large! 0x%04X\n" % (u,))
u = 0xFFFD
elif u >= 0xE000 and u <= 0xF8FF:
# these are for character not yet in the unicode standard
#sys.stderr.write("private! 0x%04X\n" % (u,))
pass
s = ' '
if ((k+1) % 9 == 0):
s = '\n'
sys.stdout.write("0x%04X,%s" % (u,s))
sys.stdout.write("=======================================\n")
lastu = -10
i = 0
n = max(m4.keys())+1
for k in range(n):
u = 0xFFFD
try:
u = m4[k]
except KeyError:
pass
run = False
if u == lastu+1 or (u == 0xFFFD and lastu == 0xFFFD):
run = True
if k == n-1:
run = False
lastu = u
if not run:
i = i + 1
s = ' '
if (i % 8 == 0):
s = '\n'
sys.stdout.write("0x%04X,%s" % (k,s))
i = i + 1
s = ' '
if (i % 8 == 0):
s = '\n'
sys.stdout.write("0x%04X,%s" % (u,s))
| StarcoderdataPython |
3381814 | <gh_stars>0
import numpy as np
class Agent(object):
def __init__(self, dim_action):
self.dim_action = dim_action
def act(self, ob, reward, done):
return np.tanh(np.random.randn(self.dim_action)) # random action | StarcoderdataPython |
3376842 | import itertools
import os
import unittest
import parameterized
from generator import rand
from exact_string_matching import forward, backward, other
from string_indexing import lcp, suffix_tree, suffix_array
def lcp_lr_contains(t, w, n, m):
SA = suffix_array.skew(t, n)
LCP_LR = lcp.build_lcp_lr(lcp.kasai(SA, t, n), n)
return lcp.contains(SA, LCP_LR, t, w, n, m)
EXACT_STRING_MATCHING_ALGORITHMS = [
[ 'Morris-Pratt', forward.morris_pratt ],
[ 'Knuth-Morris-Pratt', forward.knuth_morris_pratt ],
[ 'Boyer-Moore', backward.boyer_moore ],
[ 'Boyer-Moore with bad shifts', backward.boyer_moore_bad_shift ],
[ 'Boyer-Moore-Galil', backward.boyer_moore_galil ],
[ 'Turbo-Boyer-Moore', backward.turbo_boyer_moore ],
[ 'bad shift heuristic', backward.bad_shift_heuristic ],
[ 'quick search heuristic', backward.quick_search ],
[
'Boyer-Moore-Apostolico-Giancarlo',
backward.boyer_moore_apostolico_giancarlo
],
[ 'Horspool', backward.horspool ],
[ 'Karp-Rabin', other.karp_rabin ],
[ 'fast-on-average', other.fast_on_average ],
[ 'two-way constant space', other.two_way ],
[
'suffix tree',
lambda t, w, n, m: suffix_tree.contains(
suffix_tree.mccreight(t, n)[0], t, w, n, m),
],
[
'suffix array',
lambda t, w, n, m: suffix_array.contains(
suffix_array.prefix_doubling(t, n), t, w, n, m),
],
[ 'lcp-lr array', lcp_lr_contains ],
]
class TestExactStringMatching(unittest.TestCase):
run_large = unittest.skipUnless(
os.environ.get('LARGE', False), 'Skip test in small runs')
def check_first_exact_match(self, t, w, n, m, reference, algorithm):
self.assertEqual(next(algorithm(t, w, n, m)), reference)
def check_all_exact_matches(self, t, w, n, m, reference, algorithm):
self.assertEqual(list(algorithm(t, w, n, m)), reference)
def check_no_match(self, t, w, n, m, algorithm):
self.assertFalse(list(algorithm(t, w, n, m)))
@parameterized.parameterized.expand(EXACT_STRING_MATCHING_ALGORITHMS)
def test_example_first_exact_match(self, _, algorithm):
self.check_first_exact_match('#abaaba', '#aab', 6, 3, 3, algorithm)
self.check_first_exact_match('#abrakadabra', '#brak', 11, 4, 2, algorithm)
self.check_first_exact_match('#abrakadabra', '#ra', 11, 2, 3, algorithm)
@parameterized.parameterized.expand(EXACT_STRING_MATCHING_ALGORITHMS)
def test_example_all_exact_matches(self, _, algorithm):
self.check_all_exact_matches(
'#abaaabbaababb', '#abb', 13, 3, [5, 11], algorithm)
self.check_all_exact_matches(
'#abrakadabra', '#a', 11, 1, [1, 4, 6, 8, 11], algorithm)
self.check_all_exact_matches(
'#abrakadabra', '#bra', 11, 3, [2, 9], algorithm)
self.check_all_exact_matches('#abrakadabra', '#rak', 11, 3, [3], algorithm)
@parameterized.parameterized.expand(EXACT_STRING_MATCHING_ALGORITHMS)
def test_example_no_match(self, _, algorithm):
self.check_no_match('#abaaba', '#baaab', 6, 5, algorithm)
self.check_no_match('#abrakadabra', '#l', 11, 1, algorithm)
self.check_no_match('#abrakadabra', '#xyz', 11, 3, algorithm)
@parameterized.parameterized.expand(EXACT_STRING_MATCHING_ALGORITHMS)
@run_large
def test_random_exact_string_matching(self, _, algorithm):
T, n, m, A = 100, 500, 10, ['a', 'b']
for _ in range(T):
t, w = rand.random_word(n, A), rand.random_word(m, A)
reference = list(forward.brute_force(t, w, n, m))
self.check_all_exact_matches(t, w, n, m, reference, algorithm)
@parameterized.parameterized.expand(EXACT_STRING_MATCHING_ALGORITHMS)
@run_large
def test_all_exact_string_matching(self, _, algorithm):
N, M, A = 7, 3, ['a', 'b']
for n in range(2, N + 1):
for m in range(1, M + 1):
for t in itertools.product(A, repeat = n):
t = '#' + ''.join(t)
for w in itertools.product(A, repeat = m):
w = '#' + ''.join(w)
reference = list(forward.brute_force(t, w, n, m))
self.check_all_exact_matches(t, w, n, m, reference, algorithm)
| StarcoderdataPython |
3378022 | #!/usr/bin/env python
import os
import sqlite3
print 'Creating directories ...',
path = os.path.join('data')
try:
os.stat(path)
except OSError:
os.makedirs(path)
print 'done'
else:
print 'already done'
print 'Initialising database ...',
path = os.path.join('data', 'db.sqlite')
try:
os.stat(path)
except OSError:
c = sqlite3.connect(path)
c.executescript(open('schema.sql').read())
c.commit()
c.close()
print 'done'
else:
print 'already done'
| StarcoderdataPython |
3312769 | #
# Copyright (C) 2022 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
import pandas as pd
import numpy as np
import pmdarima
def cross_validation(arima_model: pmdarima.arima.ARIMA, df: pd.DataFrame, cutoffs: List[pd.Timestamp]) -> pd.DataFrame:
"""
Cross-Validation for time series forecasting.
Computes forecasts from historical cutoff points. The function is a modification of
prophet.diagnostics.cross_validation that works for ARIMA model.
:param arima_model: pmdarima.arima.ARIMA object. Fitted ARIMA model.
:param df: pd.DataFrame of the historical data
:param cutoffs: list of pd.Timestamp specifying cutoffs to be used during cross validation.
:return: a pd.DataFrame with the forecast, confidence interval, actual value, and cutoff.
"""
bins = [df["ds"].min()] + cutoffs + [df["ds"].max()]
labels = [df["ds"].min()] + cutoffs
test_df = df[df['ds'] > cutoffs[0]].copy()
test_df["cutoff"] = pd.to_datetime(pd.cut(test_df["ds"], bins=bins, labels=labels))
predicts = [single_cutoff_forecast(arima_model, test_df, prev_cutoff, cutoff) for prev_cutoff, cutoff in
zip(labels, cutoffs)]
# Update model with data in last cutoff
last_df = test_df[test_df["cutoff"] == cutoffs[-1]]
arima_model.update(last_df["y"].values)
return pd.concat(predicts, axis=0).reset_index(drop=True)
def single_cutoff_forecast(arima_model: pmdarima.arima.ARIMA, test_df: pd.DataFrame, prev_cutoff: pd.Timestamp,
cutoff: pd.Timestamp) -> pd.DataFrame:
"""
Forecast for single cutoff. Used in the cross validation function.
:param arima_model: pmdarima.arima.ARIMA object. Fitted ARIMA model.
:param test_df: pd.DataFrame with data to be used for updating model and forecasting.
:param prev_cutoff: the pd.Timestamp cutoff of the previous forecast.
Data between prev_cutoff and cutoff will be used to update the model.
:param cutoff: pd.Timestamp cutoff of this forecast. The simulated forecast will start from this date.
:return: a pd.DataFrame with the forecast, confidence interval, actual value, and cutoff.
"""
# Update the model with data in the previous cutoff
prev_df = test_df[test_df["cutoff"] == prev_cutoff]
if not prev_df.empty:
y_update = prev_df[["ds", "y"]].set_index("ds")
arima_model.update(y_update)
# Predict with data in the new cutoff
new_df = test_df[test_df["cutoff"] == cutoff].copy()
n_periods = len(new_df["y"].values)
fc, conf_int = arima_model.predict(n_periods=n_periods, return_conf_int=True)
fc = fc.tolist()
conf = np.asarray(conf_int).tolist()
new_df["yhat"] = fc
new_df[["yhat_lower", "yhat_upper"]] = conf
return new_df
| StarcoderdataPython |
3249769 | import numpy as np
import pandas as pd
import time, copy
import pickle as pickle
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from scipy.special import expit
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
import statsmodels.api as sm
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, Dropout, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.python.eager.context import num_gpus
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from sub_utils import exp_decay_scheduler, keras_count_nontrainable_params, resample_and_shuffle, create_tf_dataset, reshape_model_input
class Naive_Classifier:
'''
Create naive baseline classifier, that assigns a constant surrender rate, regardsless of the feature configuration.
Parameters
----------
rate: Constant probability prediction
'''
def __init__(self, rate, ):
self.rate = rate
def predict_proba(self, X):
pred = np.zeros(shape=(len(X),2))
pred[:,0] = 1-self.rate
pred[:,1]= self.rate
return pred
def predict(self, X):
return self.predict_proba(X)
def predict_class(self, X, threshold=0.5):
return self.predict_proba(X)>threshold
def create_ann(widths: list, actv: list, dropout: float, n_input: int, lrate: float):
'''
Create individual ANNs for ANN_bagging.
'''
model = Sequential()
for j in range(len(widths)):
if j==0: # Specify input size for first layer
model.add(Dense(units = widths[j], activation = actv[j], input_dim = n_input))
else:
model.add(Dense(units = widths[j], activation = actv[j]))
if j<(len(widths)-1): # No dropout after output layer
model.add(Dropout(rate = dropout))
model.compile(loss = 'binary_crossentropy', metrics= ['acc'], optimizer=Adam(lr=lrate))
return model
def hpsearch_ann(**params):
'''
Use params obtained via a hpsearch to create an ann.
This function is a helper function, to simplify the varying notation.
'''
widths = [params['width_{}'.format(1+i)] for i in range(params['depth'])]+[1]
actv = params['depth']*[params['actv']]+['sigmoid']
dropout = params['dropout']
n_input = params['n_input']
lrate = params['lrate']
model = create_ann(widths=widths, actv=actv, dropout=dropout, n_input= n_input, lrate = lrate)
return model
def hpsearch_boost_ann(resampler ='None', tf_dist_strat = None, **params):
'''
Helper function to map params to ANN_boost object initialization.
'''
N_boosting = params['n_boosting']
n_input = params['n_input']
boost_width = params['width']
actv = params['actv']
lrate = params['lrate']
return ANN_boost(N_models = N_boosting, N_input = n_input, width=boost_width, act_fct=actv, lr = lrate, resampler = resampler, tf_dist_strat=tf_dist_strat)
class Logit_model:
'''
A bagged version of the sklearn LogisticRegression model.
'''
def __init__(self, params, poly_degrees, N_bag = 5, resampler = 'None'):
self.poly_degrees = poly_degrees
self.resampler = resampler
self.N_bag = N_bag
try:
del params['random_state']
except:
pass
self.models = [LogisticRegression(**params) for _ in range(self.N_bag)]
def fit(self, X_train, y_train):
'''
Fit all individual models independently for data X, y.
'''
for i in range(self.N_bag):
# optional resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
X,y = sklearn.utils.shuffle(X,y)
# polynomial feature engineering
X_logit, y_logit = reshape_model_input(X, degrees_lst = self.poly_degrees), y
# fit model
self.models[i].fit(X_logit, y_logit)
# [self.models[i].fit(*shuffle(X_logit, y_logit, random_state=i)) for i in range(self.N_bag)]
return self # allow for one-line notation of creating and fitting the model
def predict_proba(self, X):
'''
Predict probabilities using the full ensembles of self.N_bag individual models.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.sum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/self.N_bag
def predict_proba_running_avg(self, X):
'''
Predict probabilities for all individual logit-models and report rolling average results, i.e. the benefit of adding more individual models to the ensemble.
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.cumsum(np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)]), axis = 0)/np.arange(1, self.N_bag+1).reshape((-1,1,1))
def predict_proba_individual(self, X):
'''
Predict probabilities for all individual logit-models and report them as an array of shape (N_bag, len(X), 2).
'''
X_logit = reshape_model_input(X, degrees_lst = self.poly_degrees)
return np.array([self.models[i].predict_proba(X_logit) for i in range(self.N_bag)])
class ANN_bagging:
"""
Purpose: Build multiple ANN models, use the bagged predictor in combination with an optional resampling procedure to reduce the variance of a predictor.
New version - compatible with hpsklearn optimized parameter values as input
Initialize the architecture of all individual models in the bagging procedure.
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
resampler: 'None': No resampling
'SMOTE': SMOTE resampling
'undersampling': RandomUndersampling
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model object with type(object) = dict
"""
def __init__(self, N_models: int, hparams:dict, tf_dist_strat, resampler = 'None'):
self.resampler = resampler
self.model = {}
self.hparams = hparams
self.lr = hparams['lrate']
self.tf_dist_strat = tf_dist_strat
for i in range(N_models):
# create model i
try:
with self.tf_dist_strat.scope():
self.model[i] = hpsearch_ann(**hparams)
except:
self.model[i] = hpsearch_ann(**hparams)
# set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def re_init_ensemble(self):
'''
Note: If we load old parametrizations by setting self.model[i] = value, the self.ensemble does not update automatically.
Hence, we need this value for consistently loading old values.
'''
# re-set ensemble model
try:
with self.tf_dist_strat.scope():
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
except:
INPUT = Input(shape = (self.hparams['n_input'],))
self.ensemble = Model(inputs=INPUT, outputs = tf.keras.layers.Average()([self.model[i](INPUT) for i in range(len(self.model))]))
# reduce learning rate for final fine-tuning of collective bagged model
self.ensemble.compile(optimizer = Adam(learning_rate=self.lr/2), loss = 'binary_crossentropy', metrics = ['acc'])
def fit(self, X_train, y_train, callbacks = [], val_share = 0.3, N_epochs = 200):
"""
Purpose: Train all model instances in the bagging procedure.
output:
\t None. Updates parameters of all models in self.model
input
\t X_train, y_train: \t Training data
\t resampling_option: \t 'None': No resampling is performed
\t \t 'undersampling': random undersampling of the majority class
\t \t 'SMOTE': SMOTE methodology applied
\t callbacks: \t callbacks for training
\t val_share, N_epochs, N_batch: \t Additional arguments for training
"""
# handle pandas-datatype
if type(X_train)==type(pd.DataFrame([1])):
X_train=X_train.values
if type(y_train) == type(pd.DataFrame([1])):
y_train=y_train.values
# check if GPUs are available
try:
N_GPUs = self.tf_dist_strat.num_replicas_in_sync()
except:
N_GPUs = 1
for i in range(len(self.model)):
# utilze concept of resampling
X,y = resample_and_shuffle(X_train, y_train, self.resampler)
# transform into tf.data.Dataset
try:
train_data, val_data = create_tf_dataset(X, y, val_share, self.hparams['batch_size']*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
if len(self.model)==1:
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data = val_data, verbose = 2, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks)
else:
if i==0:
# More compact view on models' training progress
print('Data of shape {} '.format(X.shape) + 'and balance factor {}'.format(sum(y)/len(y)))
# Start training of model
print('Training Model {}'.format(i))
t_start = time.time()
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])
n_epochs_trained = len(self.model[i].history.history['loss'])
print('\t ... {} epochs'.format(n_epochs_trained))
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
for _ in range(3):
print('\t ... Fine tuning')
# reduce learning rate
self.model[i].optimizer.learning_rate = self.model[i].optimizer.learning_rate/2
try:
self.model[i].fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_data= val_data, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
except:
print('using non-tf.data-format')
self.model[i].fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs,
validation_split= val_share, verbose = 2, callbacks=callbacks+[LearningRateScheduler(exp_decay_scheduler)])#, initial_epoch= n_epochs_trained)
# print(self.model[i].history.history)
# n_epochs_trained += len(self.model[i].history.history['loss'])
print('\t ... Overall time: {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# plt.plot(self.model[i].history.history['loss'], label='loss')
# plt.plot(self.model[i].history.history['val_loss'], label='val_loss')
# plt.legend()
# plt.show()
print('Final fine tuning of whole bagged estimator:')
t_start = time.time()
try:
self.ensemble.fit(x=train_data, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_data= val_data, verbose = 0, callbacks=callbacks)
except:
print('using non-tf.data-format')
self.ensemble.fit(x=X, y = y, batch_size= N_GPUs*self.hparams['batch_size'], epochs = N_epochs, validation_split= val_share, verbose = 0, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.ensemble.history.history['val_loss'])))
print('\t ... {} sec.'.format(time.time()-t_start))
print('\t ... Done!')
# Return object to allow for shorter/ single-line notation, i.e. ANN_bagging().fit()
return self
def predict(self, X):
"""
Purpose: Predict event probability for data
Inputs:
-------
\t X: \t Input data
Outputs:
--------
\t Predictions for all input data
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return self.ensemble.predict(X)
def predict_proba(self, X):
"""
Purpose: Predict event probability for data
Replicate predict_proba method of Sequential() or Model() class to unify notation.
See documentation of self.predict() method.
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return self.predict(X)
def predict_classes(self, X, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
# handle pandas-datatype
if type(X)==type(pd.DataFrame([1])):
X=X.values
return (self.predict(X)>= threshold)
class ANN_boost:
'''
Create a boosting instance with neural networks as weak learner instances.
As we add a new weak learner it will train primarily on errors of previous models. Boost rate equal 1, i.e. weak learners added by summation.
For the purpose of binary classification we impose a binary_crossentropy loss.
'''
def __init__(self, N_models, N_input, width: int, act_fct: str, lr = 0.001, tf_dist_strat = None, resampler = 'None'):
"""
Initialize the architecture of all individual models in the bagging procedure.
Model style of weak learner: input->hidden_layer-> actv_fct-> single output (incl linear actv) -> sigmoid actv (to be carved off when combining multiple weak learners)
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers.
Last entry should be linear, as boosting models add a final sigmoid activation to the added weak learners to ensure a proper probability distribution.
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model_base objects with type(object) = dict
"""
self.N_models = N_models
self.loss = 'binary_crossentropy'
self.N_input = N_input
self.width = width
self.act_fct = act_fct
self.tf_dist = tf_dist_strat
# self.dropout_rate = dropout_rate # canceled; not useful with only one hidden layer of which we tune its width
self.lr_init = lr
self.optimizer = Adam(learning_rate=self.lr_init)
self.resampler = resampler
self.history_val = []
self.history_train = []
self.training_steps = 0
# boosted models will be assigned during fitting procedure
#self.model_boost = [None]*self.N_models # depreciated version
self.model_boost = None # Save memory by reusing file-space, i.e. not saving each intermediate boosting step separately as they are recorded by self.model_base
# Create list of weak learner instances (compilation happens in creating functions)
# try:
# with self.tf_dist.scope():
# self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(self.N_models-1)]
# except Exception as e:
# print('Leaners not created within tf-distribution-strategy due to:')
# print(e)
self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(self.N_models-1)]
def fit(self, x, y, callbacks = [], val_share = 0.3, N_epochs = 200, N_batch = 64, correction_freq = 5):
'''
Fitting procedure for the ANN_boost object.
Inputs:
-------
x: Input Data
y: Targets
callbacks: list of tf.keras.callbacks objects, e.g. earlyStopping
val_share: share of (x,y) used for validation of the model during training and for potential callback options
N_epochs: number of epochs for training
N_batch: batch size for training
correction_freq: frequency in which a corrective step is performed, e.g. 0: never, 1: every epoch, 5: every 5 epochs, ...
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost.fit: y values changed from pandas.DataFrame to numpy.array')
# optional resampling
x,y = resample_and_shuffle(x, y, self.resampler)
# transform into tf.data.Dataset (important: transformation after optional resampling)
try:
train_data, val_data = create_tf_dataset(x,y,val_share, N_batch*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
if self.N_input != x.shape[1]:
raise ValueError('Error: Invalid input shape. Expected ({},) but given ({},)'.format(self.N_input, x.shape[1]))
# iterate over number of weak learners included in boosting
INPUT = Input(shape= (self.N_input,)) # re-use this input layer to avoid more cache-intensiv multi-inputs models
for n in range(1,self.N_models+1):
try:
with self.tf_dist.scope():
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
self.model_boost = Model(inputs = INPUT, outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))
else:
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
except Exception as e:
print('Booster not created within distribution strategy due to:')
print(e)
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
self.model_boost = Model(inputs = INPUT, outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))#.output))
else:
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
# train boosting model
print('Training Model {}'.format(n))
print('\t trainable params: '+ str(keras_count_nontrainable_params(self.model_boost, trainable=True)))
print('\t nontrainable params: '+ str(keras_count_nontrainable_params(self.model_boost, trainable=False)))
t_start = time.time()
if (n==1):
# set weights = 0 and bias = sigmoid^-1(baseline_hazard)
try:
with self.tf_dist.scope():
self.model_boost.layers[1].set_weights([np.array([0]*self.N_input).reshape((-1,1)), np.array([np.log(y.mean()/(1-y.mean()))])])
except Exception as e:
print('Setting weights of baseline-learner not performed within tf-distribution-strategy due to:')
print(e)
self.model_boost.layers[1].set_weights([np.array([0]*self.N_input).reshape((-1,1)), np.array([np.log(y.mean()/(1-y.mean()))])])
else:
try:
# if data in tf.data.Dataset format available
print('\t .. training on tf.data.Dataset')
self.model_boost.fit(x=train_data, validation_data = val_data, epochs = N_epochs, verbose = 2, callbacks=callbacks)
except Exception as e:
print('Leaners not created within tf-distribution-strategy due to:')
print(e)
self.model_boost.fit(x=x, y = y, batch_size= N_batch, epochs = N_epochs, validation_split= val_share, verbose = 0, callbacks=callbacks)
self.history_val += self.model_boost.history.history['val_loss']
self.history_train += self.model_boost.history.history['loss']
# evolutionary fitting of boosting model
#self.fit_evolutionary(x=x, y=y, batch_size=N_batch, epochs=N_epochs, epochs_per_it=25, validation_split=val_share, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.history_val)-self.training_steps))
self.training_steps = len(self.history_val)
print('\t ... {} sec.'.format(time.time()-t_start))
#print('\t ... eval.: ', self.model_boost.evaluate(x,y, verbose=0)) # optional: display to observe progress of training; however, slows down training.
print('\t ... Done!')
# decaying influence of weak learners
#self.optimizer.lr = self.lr_init*0.9**n
# corrective step: set all parameters as trainable and update them using SGD
if n>1:
if (correction_freq > 0) & (n%correction_freq ==0):
self.corrective_step(model = self.model_boost, x=x, y=y, callbacks=callbacks,
val_share=val_share, N_epochs = N_epochs, N_batch= N_batch)
# set trainable = False for weak learner that has been included in the boosting model
self.model_base[n-1].trainable = False
def fit_evolutionary(self, x, y, batch_size, epochs, epochs_per_it, validation_split, callbacks):
'''
Customized training scheme, using early stopping/ callbacks and a iterative reduction of the initial learning rate.
## DEPRECIATED as not very affective in the given scenario
'''
self.model_boost.fit(x=x, y = y, batch_size= batch_size, epochs = epochs_per_it, validation_split=validation_split, verbose = 0, callbacks=callbacks)
self.history_train += self.model_boost.history.history['loss']
self.history_val += self.model_boost.history.history['val_loss']
#print(self.history_train)
#print(type(self.history_train))
val_loss = min(self.history_val)
#print('minimum val_loss: ', val_loss)
evol_patience = 0
for ep in range(epochs//epochs_per_it):
self.optimizer.lr= self.lr_init*1.2**(1+ep%4)
# compile to effectively update lr
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
print(' \t Fine tuning step ', ep, '...', ' (val_loss: ', np.round_(val_loss,4), ')')
self.model_boost.fit(x=x, y = y, batch_size=batch_size, epochs = epochs_per_it, validation_split=validation_split, verbose = 0, callbacks=callbacks)
# record training/ validation history
self.history_train += self.model_boost.history.history['loss']
self.history_val += self.model_boost.history.history['val_loss']
if min(self.history_val) < val_loss*0.99:
val_loss = min(self.history_val)
else:
evol_patience += 1
if evol_patience > 3:
break
def corrective_step(self, model, x, y, callbacks = [], val_share = 0.3, N_epochs = 200, N_batch = 64):
'''
Perform a corrective step by updating all parameters of boosting model, i.e. all included weak learners.
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost.fit: y values changed from pandas.DataFrame to numpy.array')
# transform into tf.data.Dataset
try:
train_data, val_data = create_tf_dataset(x,y,val_share, N_batch*num_gpus())
except:
# go on with regular, numpy-data-type
print('tf.data.Dataset could not be constructed. Continuing with numpy-data.')
pass
# allow updating of all parameters
try:
with self.tf_dist.scope():
model.trainable = True
model.compile(optimizer = Adam(lr=self.lr_init/2), loss = self.loss, metrics = ['acc'])
except Exception as e:
print('Leaners not created within tf-distribution-strategy due to:')
print(e)
model.trainable = True
model.compile(optimizer = Adam(lr=self.lr_init/2), loss = self.loss, metrics = ['acc'])
print('Corrective Step ... ')
print('\t trainable params: '+ str(keras_count_nontrainable_params(model, trainable=True)))
print('\t nontrainable params: '+ str(keras_count_nontrainable_params(model, trainable=False)))
t_start = time.time()
#self.fit_evolutionary(x=x, y=y, batch_size=N_batch, epochs=N_epochs, epochs_per_it=25, validation_split=val_share, callbacks=callbacks)
try:
# train with tf.data.dataset; explicitly indicate val_data; batch_size indicated in tf.data.dataset
model.fit(x=train_data, epochs = N_epochs, validation_data= val_data, verbose = 2, callbacks=callbacks)
except Exception as e:
print('Model not created within tf-distribution-strategy due to:')
print(e)
model.fit(x=x, y = y, batch_size= N_batch, epochs = N_epochs, validation_split= val_share, verbose = 2, callbacks=callbacks)
print('\t ... {} epochs'.format(len(model.history.history['val_loss'])))
run_time = time.time()-t_start
print('\t ... {} sec.'.format(run_time))
print('\t ... Correction performed!')
# Lock updates
model.trainable = False
return run_time
def save_object(self, path):
'''
Function to save the ANN_boost object.
Required, as e.g. Sequential()-Object in self.model_base[i] cannot be pickled or dilled.
Hence, we save only the respective weights and provide a function load_object to restore the fully functional ANN_boost object.
Note: load_ANN_boost_object is no ANN_boost object function. However, the loaded ANN_boost object uses object.restore_learners() to restore learners and boosted models.
'''
# save weights of learners
#self.model_base = [self.model_base[i].get_weights() for i in range(self.N_models)]
# delete boosted models temporarily for pickling; can be restored with weights of (trained) learners
#cache = clone_model(self.model_boost)
#cache.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
model_backup = ANN_boost(N_models= self.N_models, N_input= self.N_input, width = self.width, act_fct = self.act_fct)
model_backup.model_base = [sub_model.get_weights() for sub_model in self.model_base] # save only weights -> to be restored in self.restore_learners()
# Note: Adam-object cannot be pickled in tf 2.4.
# workaround: switch to string-information and restore full optimizer (incl. learning_rate) in restore_learners
model_backup.optimizer = 'adam'
#self.model_boost = None#*self.N_models
with open( path, "wb" ) as file:
pickle.dump(model_backup, file)
print('ANN object dumped to ', path)
#self.model_boost = cache
def restore_learners(self):
'''
Restore the full Sequential() architecture of self.model_base[i] and self.model_boost[i] which were replaced by their weights to pickle dump the object.
'''
weights = copy.copy(self.model_base)
self.model_base = [self.create_model_prior()]+[self.create_model_learner() for _ in range(1,self.N_models)]
[self.model_base[i].set_weights(weights[i]) for i in range(self.N_models)]
#print(self.model_base)
# iterate over number of weak learners included in boosting
for n in range(1,self.N_models+1):
INPUT = Input(shape= (self.N_input,))
if n == 1:
# Note: Average Layer expects >= 2 inputs
# Add final sigmoid Activation for classification
#self.model_boost[n-1] = Model(inputs = self.model_base[0].input,
# outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0].output))
self.model_boost = Model(inputs = INPUT,#self.model_base[0].input,
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(self.model_base[0](INPUT)))#.output))
else:
#self.model_boost[n-1]
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n)]# .output for i in range(n)]
)
)
)
# set trainable = True for newly added weak learner (relevant if we retrain model)
self.model_base[n-1].trainable = True
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
def create_model_prior(self):
'''
Base model 0 in boosting structure; expresses a prior estimate (here constant rate) that will be improved by subsequent model created by create_model_learner.
'''
model = Sequential()
model.add(Dense(1, activation= 'linear', input_dim = self.N_input))
model.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
return model
def create_model_learner(self):
'''
Create architecture for weak learners in boosting strategy.
'''
model = Sequential()
# Hidden layer
try:
model.add(Dense(units = self.width, activation = self.act_fct, input_dim = self.N_input))
except:
# old implementation
model.add(Dense(units = self.width_lst[0], activation = self.act_fct_lst[0], input_dim = self.N_input))
print('sub_surrender_models, create_model_learner(): atributes width_lst and act_fct_lst depreciated!')
# Output layer
model.add(Dense(units = 1, activation = 'linear'))
model.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
return model
def prune_booster(self, n_learners:int):
'''
Take user input how many weak learners should be utilized. The rest will be discarded.
'''
assert n_learners<= self.N_models
assert n_learners > 1
INPUT = Input(shape= (self.N_input,)) # re-use this input layer to avoid more cache-intensiv multi-inputs models
self.model_boost = Model(inputs = INPUT,#[self.model_base[i].input for i in range(n)],
# Note: Average() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Activation(tf.keras.activations.sigmoid)(
tf.keras.layers.Add()(
[self.model_base[i](INPUT) for i in range(n_learners)]# .output for i in range(n)]
)
)
)
# compile model
self.model_boost.compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
def evaluate(self, x, y=None):
try:
# x is tf.data.Dataset
return self.model_boost.evaluate(x, verbose=0)
except:
return self.model_boost.evaluate(x,y, verbose=0)
def predict_proba(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
# Use last iteration of boosting procedure
# Note: tf.keras.models.Model() does not posses .predict_proba(), but only .predict()
return self.model_boost.predict(x)
def predict(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
# Use last iteration of boosting procedure
# Note: tf.keras.models.Model() does not posses .predict_proba(), but only .predict()
return self.model_boost.predict(x)
def predict_classes(self, x, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost.fit: x values changed from pandas.DataFrame to numpy.array')
return (self.predict(x)> threshold)
def analyze_ensemble(model, x, y, profile: int, poly_degrees_max = None):
'''
Check for different model types, i.e. Logit-ensemble, ANN-ensemble and ANN-booster, by how much an additional learners improves the performance.
Goal: Determine a reasonable number of the depth of boosting/ no. of weak lerners to work together, to limit computational effort
Inputs:
-------
model: model(s) to evaluate; either list of models or single model
x: input data, typically validation data
y: target data, typically validation data
Outputs:
--------
None; a plot with performance over number of learners is produced.
'''
if type(model) == type([]):
pass
else:
model = [model]
for m in model:
if type(m) == ANN_bagging:
try: x_val = x.values
except: pass
# learners = model.model # dictionary of learners
pred = [l.predict(x_val) for l in m.model.values()]
# respect avering effect of bagging-ensemble
pred = np.cumsum(np.array(pred), axis = 0)/np.arange(1, len(pred)+1).reshape((-1,1,1))
entropy = [log_loss(y_true = y, y_pred=p) for p in pred]
plt.plot(range(1, len(pred)+1), entropy, label = 'NN (bag)')
elif type(m) == ANN_boost:
try: x_val = x.values
except: pass
# learners = model.model_base # list of models
pred = [l.predict(x_val) for l in m.model_base]
# Note: do not forget final sigmoid function to form boosted-ensemble-prediction
pred = expit(np.cumsum(np.array(pred), axis = 0))
entropy = [log_loss(y_true = y, y_pred=pred[i]) for i in range(len(pred))]
plt.plot(range(1, len(pred)+1), entropy, label = 'NN (boost)')
elif type(m) == Logit_model:
assert type(poly_degrees_max) != type(None)
# learners = model.models # list of models
pred = [l.predict_proba(reshape_model_input(x, degrees_lst=[poly_degrees_max]*x.shape[1]))[:,-1] for l in m.models]
# respect avering effect of bagging-ensemble
pred = np.cumsum(np.array(pred), axis = 0)/np.arange(1, len(pred)+1).reshape((-1,1))
entropy = [log_loss(y_true = y, y_pred=p) for p in pred]
plt.plot(range(1, len(pred)+1), entropy, label = 'Logist. Regr.')
else:
raise ValueError('Model type not compatible with method!')
plt.ylabel('entropy loss')
plt.xlabel('# of learners')
plt.yscale('log')
plt.legend()
plt.title(f'ensemble models of profile {profile}')
plt.show()
########################################
########### LEGACY CODE ################
########################################
# Note: These classes are either not used, e.g. since Logit_boosting showed poor performance,
# or have been updated to a later version, e.g. Logit_model_old (with integrated feature preprocessing) -> Logit_model (a priori feature-preprocessing and K-Fold)
class Logit_model_old:
'''
Create a logistic model from either sklearn or statsmodels (significance analysis in summary included).
Further, we allow for adding higher degrees of the input variables without having to change the input data.
Requirements:
\t import statsmodels.api as sm
\t from sklearn.models import LogisticRegression
'''
def __init__(self, package='sklearn', polynomial_degrees = [1,1,1], resampler = 'None',
X = None, y = None):
self.package = package
self.poly_degrees = polynomial_degrees
# Polynomial degrees of features selected after feed-forward fitting process
self.poly_selected = [1]*len(polynomial_degrees)
self.resampler = resampler
if package == 'sklearn':
self.model = LogisticRegression(solver = 'liblinear', penalty = 'l2')
elif package == 'statsmodels':
if self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X, y=y)
elif self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X, y=y)
elif self.resampler == 'None':
# Do nothing
pass
else:
print('Error: Resampler not recognized!')
self.model = sm.Logit(endog = y, exog = reshape_model_input(sm.add_constant(X),
degrees_lst =self.poly_degrees))
else:
print('Error: Package not valid!')
def fit(self, X=None, y=None, val_share = 0.2):
'''
Fit a Logistic Regression object.
Add higher-order polynomials of the input features in a feed-forward manner up to the max input-polynomial degree.
E.g. poly_degrees = [3,3,3] would result in checking [1,1,1], [2,1,1], .., [2,2,2], [3,2,2], ... [3,3,3]
Degree is only increased if validation error increases.
Parameters
----------
X: Features
y: Targets
var_share: Share of (X,y) used for validation during the forward-selection of features
'''
if self.package == 'sklearn':
if self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X, y=y)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X, y=y)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'None':
# Do nothing
pass
else:
print('Error: Resampler not recognized!')
# forward selection
n = len(self.poly_degrees)
bool_degree_increase = [True]*n
degrees_start = [1]*n
best_model = self.model.fit(X = reshape_model_input(X[0:int((1-val_share)*len(X))],
degrees_lst =degrees_start), y = y[0:int((1-val_share)*len(y))])
best_model_eval = sklearn.metrics.log_loss(y_true = y[int((1-val_share)*len(y)):],
y_pred=best_model.predict_proba(X = reshape_model_input(X[int((1-val_share)*len(X)):],
degrees_lst =degrees_start))[:,-1])
#
for _ in range(1,max(self.poly_degrees)):
# increase all elements/ orders in list stepwise each by magnitude of 1
for i in range(n):
# check if degree permitted by self.poly_degrees
if (degrees_start[i]+1 <= self.poly_degrees[i])& bool_degree_increase[i]:
degrees_start[i]+=1
#print(degrees_start)
model_new = self.model.fit(X = reshape_model_input(X[0:int((1-val_share)*len(X))],
degrees_lst =degrees_start), y = y[0:int((1-val_share)*len(y))])
model_new_eval = sklearn.metrics.log_loss(y_true = y[int((1-val_share)*len(y)):],
y_pred = model_new.predict_proba(X = reshape_model_input(X[int((1-val_share)*len(X)):],
degrees_lst =degrees_start)))
# compare validation error
if model_new_eval< best_model_eval:
# save new, best model reference
best_model_eval = copy.copy(model_new_eval)
best_model = copy.copy(model_new)
else:
# reverse increase of polynomial order and stop fwd-selection of feature i
degrees_start[i]-=1
#print('Validation error increased for feature {}.'.format(i))
bool_degree_increase[i] = False
#print(str(model_new.coef_.shape)+ '(new) vs. (best) ' + str(best_model.coef_.shape))
#print('\n')
self.poly_selected = degrees_start
self.model = best_model
# fit cross-validated model on selected poly.-degrees for all data
#self.model = self.model.fit(X = reshape_model_input(X, degrees_lst =self.poly_selected), y = y)
print('Logistic model built successfully; pruned to polynomial features with degrees {}'.format(self.poly_selected))
if self.package == 'statsmodels':
self.model = self.model.fit(method='bfgs', maxiter=100)
print('Note: Fitting data for statsmodel provided at initialization.')
print('Note: Forward selection of model features not implemented for "statsmodels-package".')
return self
def predict(self, X):
"""
Purpose: Predict class for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
if self.package == 'sklearn':
return self.model.predict(X = reshape_model_input(df_input = X, degrees_lst =self.poly_selected))
elif self.package == 'statsmodels':
return self.model.predict(exog=reshape_model_input(df_input = sm.add_constant(X),
degrees_lst =self.poly_selected))
else:
print('Error: Package unknown!')
def predict_proba(self, X):
"""
Purpose: Predict event probability for data
Replicate predict_proba method of other model-classes to unify notation.
See documentation of self.predict() method.
"""
if self.package == 'sklearn':
return self.model.predict_proba(X = reshape_model_input(df_input = X, degrees_lst =self.poly_selected))
elif self.package == 'statsmodels':
return self.model.predict(exog=reshape_model_input(df_input = sm.add_constant(X),
degrees_lst =self.poly_selected))
else:
print('Error: Package unknown!')
def predict_classes(self, X, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of other model-classes to unify notation.
"""
return self.predict_proba(X)>threshold
def summary(self, X=None):
'''
Provide brief summary of coefficients, values and significance (for statsmodels only).
'''
if self.package == 'sklearn':
df = pd.DataFrame(data = None,
columns = ['const.']+list(reshape_model_input(df_input = X.loc[0:1,:],
degrees_lst =self.poly_selected).columns))
df.loc['',:] = [self.model.intercept_[0]]+self.model.coef_.flatten().tolist()
print(df)
elif self.package == 'statsmodels':
# Use summary() of statsmodels.api.Logit object
print(self.model.summary())
# preliminary - class not functional yet
class ANN_boost_grad:
'''
Create a gradient boosting instance with neural networks as weak learner instances.
As we add a new weak learner it will train primarily on errors of previous models. Boost rate initialized with 1, but eventually adapted in corrective step.
For the purpose of binary classification we impose a binary_crossentropy loss.
'''
def __init__(self, N_models, N_input, width_lst = [], act_fct_lst = [], dropout_rate = 0, optimizer = 'adam'):
"""
Initialize the architecture of all individual models in the bagging procedure.
Inputs:
-------
N_models: Number of models to be included in bagging procedure
N_input: Number of input nodes
width_lst: List containing the width for all layers, and hence implicitely also the depth of the network
act_fct_lst: List containing the activation function for all layers.
Last entry should be sigmoid, as gradient boosting models add probability outputs of weak learners.
dropout_rate: Dropout rate applied to all layers (except output layer)
dropout_rate = 0 will effectively disable dropout
loss: loss function which the model will be compiled with. Standard option: 'binary_crossentropy'
optimizer: loss function which the model will be compiled with. Standard option: 'adam'
Outputs:
--------
None. Creates self.model_base objects with type(object) = dict
"""
self.N_boost = N_models
self.optimizer = optimizer
self.loss = 'binary_crossentropy'
self.N_input = N_input
if act_fct_lst[-1] != 'sigmoid':
raise Exception('Gradient boosting models adds probability outputs of weak learners. Final activation should be sigmoid!')
# boosted models will be assigned during fitting procedure
self.model_boost = {}
# Create weak learner instances
self.model_base = {}
for i in range(N_models):
# Create first model to to capture baseline hazard
if i == 0:
self.model_base[i] = Sequential()
self.model_base[i].add(Dense(1, activation= 'sigmoid', input_dim = N_input))
else:
self.model_base[i] = Sequential()
for j in range(len(width_lst)):
if j==0: # Specify input size for first layer
self.model_base[i].add(Dense(units = width_lst[j], activation = act_fct_lst[j], input_dim = N_input))
else:
self.model_base[i].add(Dense(units = width_lst[j], activation = act_fct_lst[j]))
if j<(len(width_lst)-1): # No dropout after output layer
self.model_base[i].add(Dropout(rate = dropout_rate))
# compile base models
self.model_base[i].compile(optimizer = self.optimizer, loss = self.loss, metrics = ['acc'])
def fit(self, x, y, callbacks = [], val_share = 0.2, N_epochs = 200, N_batch = 64, correction_freq = 1):
'''
Fitting procedure for the ANN_boost_grad object.
Inputs:
-------
x: Input Data
y: Targets
callbacks: list of tf.keras.callbacks objects, e.g. earlyStopping
val_share: share of (x,y) used for validation of the model during training and for potential callback options
N_epochs: number of epochs for training
N_batch: batch size for training
correction_freq: frequency in which a corrective step is performed, e.g. 0: never, 1: every epoch, 5: every 5 epochs, ...
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost_grad.fit: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost_grad.fit: y values changed from pandas.DataFrame to numpy.array')
if self.N_input== x.shape[1]:
pass
else:
print('Error: Invalid input shape. Expected ({},) but given ({},)'.format(self.N_input, x.shape[1]))
exit()
if type(y) != type(np.array([1])):
# transform pd.series to np.array format -> required for tf.keras model and sample_weight
y = y.values.reshape((-1,1))
# iterate over number of weak learners included in boosting
for n in range(1,self.N_boost+1):
# train weak learners conditionally
print('Training weak learner {}'.format(n))
print('\t trainable params: '+ str(keras_count_nontrainable_params(self.model_base[n-1], trainable=True)))
#print('\t nontrainable params: '+ str(keras_count_nontrainable_params(self.model_boost[n-1], trainable=False)))
t_start = time.process_time()
if n==1:
# set weights = 0 and bias = sigmoid^-1(baseline_hazard)
self.model_base[n-1].layers[-1].set_weights([np.array([0]*self.N_input).reshape((-1,1)),
np.array([-np.log((1-y.mean())/y.mean())])])
else:
# compute new targets based of 2nd order taylor approx of binary-crossentropy loss
pred = self.model_boost[n-2].predict([x]*(n-1))
g = (pred-y)/(pred*(1-pred)) # 1st order
h = (-2*pred**2+y*(1+pred))/(pred*(1-pred))**2 # 2nd order
#print('type(g): ' +str(type(g)))
#print('type(h): ' +str(type(h)))
#print('type(g/h): ' +str(type(g/h)))
#print('\n')
#print('g.shape: ' +str(g.shape))
#print('h.shape: ' +str(h.shape))
#print('g/h.shape: ' +str((g/h).shape))
#print('y.shape: ' +str(y.shape))
# train weak learner w.r.t. mse loss for new target; for faster convergence, normalize sample_weights
self.model_base[n-1].fit(x=x, y = -g/h, sample_weight = h.flatten()/h.sum(), batch_size= N_batch, epochs = N_epochs,
validation_split= val_share, verbose = 0, callbacks=callbacks)
print('\t ... {} epochs'.format(len(self.model_base[n-1].history.history['val_loss'])))
print('\t ... {} sec.'.format(time.process_time()-t_start))
print('\t ... Done!')
# add newly trained weak learner to boosting model
if n == 1:
# Note: Add Layer expects >= 2 inputs
self.model_boost[n-1] = self.model_base[n-1]
else:
self.model_boost[n-1] = Model(inputs = [self.model_base[i].input for i in range(n)],
# Note: Add() needs list as input; use .output, not .outputs (-> list of lists)
outputs = tf.keras.layers.Add()(
[self.model_base[i].output for i in range(n)]
)
)
self.model_boost[n-1].compile(loss = 'binary_crossentropy', optimizer = self.optimizer)
# corrective step: set all parameters as trainable and update them using SGD
if n>1:
if (correction_freq > 0) & (n%correction_freq ==0):
self.corrective_step(model = self.model_boost[n-1], x=x, y=y, callbacks=callbacks,
val_share=val_share, N_epochs = N_epochs, N_batch= N_batch)
def corrective_step(self, model, x, y, callbacks = [], val_share = 0.2, N_epochs = 200, N_batch = 64):
'''
Perform a corrective step by updating all parameters of boosting model, i.e. all included weak learners.
'''
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost_grad.corrective_step: x values changed from pandas.DataFrame to numpy.array')
if type(y) == type(pd.DataFrame([1])):
y=y.values
#print('ANN_boost_grad.corrective_step: y values changed from pandas.DataFrame to numpy.array')
# allow updating of all parameters
model.trainable = True
print('Corrective Step ... ')
print('\t trainable params: '+ str(keras_count_nontrainable_params(model, trainable=True)))
print('\t nontrainable params: '+ str(keras_count_nontrainable_params(model, trainable=False)))
t_start = time.process_time()
model.fit(x=[x]*len(model.inputs), y = y, batch_size= N_batch, epochs = N_epochs,
validation_split= val_share, verbose = 0, callbacks=callbacks)
print('\t ... {} epochs'.format(len(model.history.history['val_loss'])))
print('\t ... {} sec.'.format(time.process_time()-t_start))
print('\t ... Correction performed!')
# Lock updates
model.trainable = False
def predict_proba(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost_grad.predict_proba: x values changed from pandas.DataFrame to numpy.array')
# Use last iteration of boosting procedure
# Note: tf.keras.models.Model() does not posses .predict_proba(), but only .predict()
return self.model_boost[self.N_boost-1].predict([x]*self.N_boost)
def predict(self, x):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost_grad.predict: x values changed from pandas.DataFrame to numpy.array')
# Use last iteration of boosting procedure
# Note: tf.keras.models.Model() does not posses .predict_proba(), but only .predict()
return self.model_boost[self.N_boost-1].predict([x]*self.N_boost)
def predict_classes(self, x, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
# handle pandas-datatype
if type(x)==type(pd.DataFrame([1])):
x=x.values
#print('ANN_boost_grad.predict_classes: x values changed from pandas.DataFrame to numpy.array')
return (self.predict([x]*self.N_boost)> threshold)
class Logit_boosting:
'''
Build a bagging procedure for Logistic models (from either the 'sklearn' or 'statsmodels' package) including an optional resampling procedure.
'''
def __init__(self, N_models, polynomial_degrees = [1,1,1], bool_ada_boost =True, resampler = 'None',
package='sklearn', X = None, y = None):
self.resampler = resampler
self.polynomial_degrees = polynomial_degrees
self.bool_ada_boost = bool_ada_boost
if self.bool_ada_boost:
self.model = AdaBoostClassifier(base_estimator=LogisticRegression(),n_estimators=N_models)
else:
raise ValueError('logitBoost not implemented')
# self.model = logitboost.LogitBoost(LogisticRegression(), n_estimators=N_models, random_state=0)
# print('Note: LogitBoost model only works for regressors as weak learners are fitted on residuals, i.e. crossentropy loss fails.')
# print('Abording action in Logit_boosting.__init__ in sub_surrender_models.py')
# exit()
def fit(self, X_train, y_train, val_share = 0.2):
"""
Purpose: Train all model instances in the boosting procedure.
Inputs:
-------
\t X_train, y_train: \t Training data
\t resampling_option: \t 'None': No resampling is performed
\t \t 'undersampling': random undersampling of the majority class
\t \t 'SMOTE': SMOTE methodology applied
\t callbacks: \t callbacks for training
\t val_share, N_epochs, N_batch: \t Additional arguments for training
Outputs:
--------
\t None. Updates parameters of all models in self.model
"""
# transform input X to higher degrees of features
if self.bool_ada_boost:
# utilze concept of resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
#X,y = sklearn.utils.shuffle(X,y)
# include higher polynomial-degrees of input features
X = reshape_model_input(X, degrees_lst =self.polynomial_degrees)
# utilize AdaBoostClassifier object
self.model.fit(X,y)
return self
else:
# utilze concept of resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
#X,y = sklearn.utils.shuffle(X,y)
X = reshape_model_input(X, degrees_lst =self.polynomial_degrees)
self.model.fit(X = X, y = y)
# Return model(s) to allow for shorter/ single-line notation, i.e. Logit_bagging().fit()
return self
def predict_proba(self, X):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
#pred = sum([self.model[i].predict_proba(X) for i in range(len(self.model))])/len(self.model)
return self.model.predict_proba(X = reshape_model_input(X, degrees_lst =self.polynomial_degrees))
def predict(self, X):
"""
Purpose: Predict label for data
Replicate predict_proba method of Sequential() or Model() class to unify notation.
See documentation of self.predict() method.
"""
return self.model.predict(X = reshape_model_input(X, degrees_lst =self.polynomial_degrees))
def predict_classes(self, X, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
return self.model.predict(X = reshape_model_input(X, degrees_lst =self.polynomial_degrees))
class Logit_bagging:
'''
Build a bagging procedure for Logistic models (from either the 'sklearn' or 'statsmodels' package) including an optional resampling procedure.
'''
def __init__(self, N_models, package='sklearn', polynomial_degrees = [1,1,1], resampler = 'None',
X = None, y = None):
self.resampler = resampler
self.model = {}
for i in range(N_models):
# create model i
self.model[i] = Logit_model(package=package, polynomial_degrees = polynomial_degrees,
resampler = resampler, X = X, y = y)
def fit(self, X_train, y_train, val_share = 0.2):
"""
Purpose: Train all model instances in the bagging procedure.
Inputs:
-------
\t X_train, y_train: \t Training data
\t resampling_option: \t 'None': No resampling is performed
\t \t 'undersampling': random undersampling of the majority class
\t \t 'SMOTE': SMOTE methodology applied
\t callbacks: \t callbacks for training
\t val_share, N_epochs, N_batch: \t Additional arguments for training
Outputs:
--------
\t None. Updates parameters of all models in self.model
"""
for i in range(len(self.model)):
# utilze concept of resampling
if self.resampler == 'undersampling':
X,y = RandomUnderSampler(sampling_strategy= 'majority').fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampler == 'SMOTE':
X,y = SMOTE().fit_resample(X=X_train, y=y_train)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
else:
X,y = X_train, y_train
X,y = sklearn.utils.shuffle(X,y)
self.model[i].fit(X=X, y = y, val_share = val_share)
# Return model(s) to allow for shorter/ single-line notation, i.e. Logit_bagging().fit()
return self
def predict_proba(self, X):
"""
Purpose: Predict event probability for data
output:
\t Predictions for all input data
input:
\t X: \t Input data
"""
pred = self.model[0].predict_proba(X)
for i in range(1,len(self.model)):
pred+=self.model[i].predict_proba(X)
return pred/len(self.model)
def predict(self, X):
"""
Purpose: Predict label for data
Replicate predict_proba method of Sequential() or Model() class to unify notation.
See documentation of self.predict() method.
"""
return self.predict_classes(X)
def predict_classes(self, X, threshold = 0.5):
"""
Purpose: Predict class memberships/ labels for data
Replicate predict_classes method of Sequential() or Model() class to unify notation.
"""
return (self.predict_proba(X)> threshold)
class Tree_Classifier:
'''
Build a tree based classifier. Fitting is based on pruning w.r.t. binary crossentropy, i.e. log_loss().
For the RandomForestClassifier option we prune our tree automatically at a max_depth=5
criterion: Method for binary splits of tree fitting procedure, {'gini, 'entropy'}
bool_forest: Boolean to decide whether a DecisionTreeClassifier (False) or a RandomForestClassifier (True) will be built.
resampling: Indicates if a resampling strategy is used {'SMOTE', 'undersampling'}, or not {'None'}
'''
def __init__(self, criterion = 'gini', bool_cv = False, bool_forest = False, N_trees = 1, alpha = 0,
resampling = 'None'):
if bool_forest == False:
self.model = sklearn.tree.DecisionTreeClassifier(criterion= criterion, ccp_alpha = alpha)
else:
self.N_trees = N_trees
self.model = sklearn.ensemble.RandomForestClassifier(criterion = criterion, n_estimators=self.N_trees,
max_depth= 5, ccp_alpha = alpha )
self.criterion = criterion
self.resampling = resampling
self.bool_forest = bool_forest
self.bool_cv = bool_cv
def fit(self, X, y, val_share = 0.2, max_depth = 10):
'''
Fit classifier, including a pruning procedure.
Pruning is performed w.r.t. binary_crossentropy evaluated on a validation set and up to a maximal depth.
Parameters:
-----------
val_share: Determines share of training data used for validation
max_depth: Maximum depth considered in pruning procedure
'''
if self.resampling == 'SMOTE':
X,y = SMOTE().fit_resample(X,y)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampling == 'undersampling':
X,y = RandomUnderSampler().fit_resample(X,y)
# shuffle data, otherwise all oversampled data are appended
X,y = sklearn.utils.shuffle(X,y)
elif self.resampling == 'None':
pass # do nothing
else:
print('Error: Resampling Option is not yet implemented!')
if self.bool_forest == False:
# Perform pruning for DecisionTreeClassifier
if self.bool_cv:
model_cv = sklearn.model_selection.GridSearchCV(estimator=self.model,
param_grid= {'ccp_alpha':[0, 0.001, 0.0001, 0.00001, 0.000001],
'criterion':['gini', 'entropy']})
model_cv.fit(X,y)
self.model = model_cv.best_estimator_
else:
# pruning purely wrt max_depth and validated entropy-loss
classifier = {}
classifier_eval = {}
for i in range(1,max_depth+1):
# Build models up to max_depth
classifier[i] = sklearn.tree.DecisionTreeClassifier(criterion=self.criterion, min_samples_leaf = 20,
max_depth=i).fit(X=X[0:int((1-val_share)*len(X))],
y=y[0:int((1-val_share)*len(y))])
# Evaluate log_loss of models
classifier_eval[i] = sklearn.metrics.log_loss(y_true = y[int((1-val_share)*len(y)):],
y_pred = classifier[i].predict_proba(X=X[int((1-val_share)*len(X)):])[:,-1])
best = 1+np.argmin(list(classifier_eval.values()))
#plt.plot([i for i in range(1,max_depth+1)], [classifier_eval[i] for i in range(1,max_depth+1)])
print('Note: Pruning of tree classifier sucessful with max_depth = {}'.format(best))
# Build model w.r.t. optimal depth
self.model = classifier[best]
else:
# Build RandomForestClassifier with a imposed max_depth=5
self.model.fit(X, y)
# return object to allow for compact notation of e.g. Tree_classifier().fit()
return self
def predict_proba(self, X):
'''
Predict event probability of data X
'''
return self.model.predict_proba(X)
def predict(self, X):
'''
Predict class membership of data X
'''
return self.model.predict(X)
def predict_classes(self, X):
'''
Predict class membership of data X
'''
pred = np.zeros(shape=(len(X),2))
pred_class = self.model.predict(X)
pred[:,1] = pred_class
pred[:,0] = 1- pred_class
return pred | StarcoderdataPython |
97005 | <filename>torrent.py
import urllib, requests, os
from bs4 import BeautifulSoup
'''
The base class for all torrent downloaders to derive from.
It shares an interface that is used throughout all download instances.
'''
# TODO: abstract
class TorrentDownloader(object):
'''
#
'''
def __init__(self):
base = ''
'''
# description
This function merely starts the magnet link, using the default
software that the operating system designates it should use,
for example, uTorrent. It does not in itself download the file.
# params
@magnet_link: a full path to a magnet link, it usually resides on torrent sites anchor tag
'''
def download(self, magnet_link):
try:
os.startfile(magnet_link)
except Exception as err:
print("Err: %s" % err)
'''
A specific implementation of a class that downloads from the site piratebay: www.piratebay.se
'''
class PirateBayDownloader(TorrentDownloader):
def __init__(self):
base = 'https://thepiratebay.se'
'''
# description
Make a request to a given url and find the magnet link on a pre-determined html structure.
# params
@url: The full path to the torrent to download, for ex:
https://thepiratebay.se/torrent/10176531/Game.of.Thrones.S04E07.720p.HDTV.x264-KILLERS_[PublicHD]
'''
def get_magnet(self, url):
request = requests.get(url)
if request:
html = BeautifulSoup(request.content)
if html:
magnet = html.find('div', attrs={'class': 'download'})
return magnet.a['href']
else:
raise Exception("Could not request url %s" % url)
'''
# description
Search for hd shows on piratebay and order it by most seeds.
# params
@query: The query to search for, it will be automatically url escaped.
'''
def search_hdshows(self, query):
url = base + '/search/%s/0/7/208' % urllib.request.pathname2url(query)
request = requests.get(url)
if request:
html = BeautifulSoup(request.content)
if html:
divs = html.find_all('div', attrs={'class':'detName'})
for div in divs:
anchor = div.a
(name, href) = anchor.text, anchor['href']
# TODO: check if file does not already exist in local lib
if True:
download_url = self.base + href
magnet_link = get_magnet(download_url)
print(magnet_link)
else:
raise Exception("Could not request url %s" % url)
| StarcoderdataPython |
3282659 | #!/usr/bin/env python
"""
.. module:: TestRunner
:synopsis: Runs the unit test suite.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import os
import subprocess
import sys
from enum import Enum
from baseline import BaseLine
from environment import EnvironmentNames, Environment
from logger import Logger
from tools import Colors
class TestRunner:
"""
Executes the available unit tests.
"""
def __init__(self):
"""
Performs initialization.
"""
self.logger = Logger()
self.environment:Environment = Environment()
def initialize_database(self, database: str)-> bool:
"""
Initialize the database.
Parameters
----------
database
The database provider (SQLite | TBD)
"""
self.logger.logInformation("\nBegin initialize database for {}".format(database), Colors.BrightYellow)
status = subprocess.call (["dotnet", "run", "--no-launch-profile", "-p", "ModelRelief", "--MRExitAfterInitialization=True", "--MRUpdateSeedData=False", "--MRInitializeDatabase=True", "--MRSeedDatabase=True", "--MRDatabaseProvider={}".format(database)])
self.logger.logInformation("End initialize database for {}".format(database), Colors.BrightYellow)
return status == 0
def create_baseline(self, database: str):
"""
Create the unit test database baseline.
Parameters
----------
database
The database provider (SQLite | TBD)
"""
baseline = BaseLine(self.logger, database)
baseline.create_baseline_database()
def execute_database_tests(self, database: str):
"""
Execute the unit tests for the given database provider.
Parameters
----------
database
The database provider (SQLite | TBD)
"""
self.logger.logInformation("\nBegin test execution for {}".format(database), Colors.BrightGreen)
os.environ[EnvironmentNames.MRDatabaseProvider] = database
subprocess.call (["dotnet", "test", "--results-directory", "ModelRelief.Test/TestResults", "--logger", "trx;LogFileName={}TestResults.trx".format(database), "ModelRelief.Test"])
self.logger.logInformation("End test execution for {}".format(database), Colors.BrightGreen)
def execute_relief_tests(self ):
"""
Execute the Relief C++ extension unit tests.
"""
self.logger.logInformation("\nBegin Relief C++ extension tests", Colors.BrightGreen)
relief_executable = os.path.join (os.environ[EnvironmentNames.MRSolution], "Relief/tests/bin/reliefUnitTests")
print (relief_executable)
subprocess.run (relief_executable)
self.logger.logInformation("End Relief C++ extension tests.", Colors.BrightGreen)
def run (self):
"""
For all databases:
1) Initialize database and user store.
2) Create clean unit test database.
3) Execute unit tests.
"""
os.system('clear')
self.logger.logInformation("\nTestRunner start", Colors.BrightCyan)
# save environment
self.environment.push()
# database
databases = ["SQLite"]
for database in databases:
# initialize database and user store
if not self.initialize_database(database):
return
# unit tests
self.execute_database_tests(database)
# Relief C++ extension
self.execute_relief_tests()
# restore environment
self.environment.pop()
self.logger.logInformation("\nTestRunner end", Colors.BrightCyan)
def main():
"""
Main entry point.
"""
# run from solution root
root = os.environ[EnvironmentNames.MRSolution]
os.chdir(root)
testrunner = TestRunner()
testrunner.run()
if __name__ == "__main__":
print (sys.version)
main()
| StarcoderdataPython |
1737881 | # Generated by Django 3.1.8 on 2021-04-24 01:11
import core.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0101_new_availability_tags"),
]
operations = [
migrations.AddField(
model_name="report",
name="full_address",
field=models.TextField(
blank=True,
help_text="Update for the entire address, including city and zip code",
null=True,
),
),
migrations.AddField(
model_name="report",
name="hours",
field=models.TextField(
blank=True, help_text="Update for hours information", null=True
),
),
migrations.AddField(
model_name="report",
name="planned_closure",
field=models.DateField(
blank=True,
help_text="Date this site a site plans to stop operating",
null=True,
),
),
migrations.AddField(
model_name="report",
name="restriction_notes",
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name="report",
name="vaccines_offered",
field=models.JSONField(
blank=True,
help_text="JSON array of strings representing vaccines on offer here",
null=True,
),
),
migrations.AddField(
model_name="report",
name="website",
field=core.fields.CharTextField(
blank=True,
help_text="Update for website information",
max_length=65000,
null=True,
),
),
]
| StarcoderdataPython |
164695 | <filename>recommend.py
#-*- coding:utf-8 -*-
import svdRec
import numpy as np
from numpy import *
import MySQLdb
import sys
def loadMatrixFromMysql(conn):
cur = conn.cursor()
cur.execute("select count(*) from ot_book")#读取书本的总数
totalbook=cur.fetchone()[0];
cur.execute("select count(*) from ot_member")#读取用户总数
totaluser=cur.fetchone()[0];
cur.execute("select * from ot_score")#读取所有的评分信息
temp = cur.fetchall()
a = np.zeros(shape=(totaluser,totalbook))#构造一个用户数*书本数的评分矩阵并初始化为零
for isbn_id,user_id,value in temp:
a[user_id-1,isbn_id-1]=value#将用户的评分填入矩阵
cur.close()
return a
def saveToMysql(conn,data):
cur=conn.cursor()
cur.execute('truncate table ot_recommend')#清空推荐表
print data
for item in data:
book_id=[int(item[0])+1]
cur.execute('insert into ot_recommend values (%s)',book_id)#将推荐结果插入到推荐表
cur.close()
return
conn= MySQLdb.Connect(
host = '127.0.0.1',
port = 3306,
user = 'root',
passwd = '',
db = 'onethink',
charset = 'utf8'
)
myMat=mat(loadMatrixFromMysql(conn))
#print myMat
# print svdRec.ecludSim(myMat[:,0],myMat[:,1])
# print svdRec.ecludSim(myMat[:,0],myMat[:,0])
# print svdRec.cosSim(myMat[:,0],myMat[:,1])
# print svdRec.cosSim(myMat[:,0],myMat[:,0])
# print svdRec.pearsSim(myMat[:,0],myMat[:,1])
# print svdRec.pearsSim(myMat[:,0],myMat[:,0])
data=svdRec.recommend(myMat,sys.argv[1])#sys.argv[1] ,为当前用户推荐
# print svdRec.recommend(myMat,5,simMeas=svdRec.ecludSim)
# print svdRec.recommend(myMat,5,simMeas=svdRec.pearsSim)
saveToMysql(conn,data)
conn.commit()
conn.close() | StarcoderdataPython |
1739955 | <reponame>niyunsheng/Mtianyan-AdvancePython
#python为了将语义变得更加明确,就引入了async和await关键词用于定义原生的协程
# async def downloader(url):
# return "bobby"
import types
@types.coroutine
def downloader(url):
yield "bobby"
async def download_url(url):
#dosomethings
html = await downloader(url)
return html
if __name__ == "__main__":
coro = download_url("http://www.imooc.com")
# next(None)
coro.send(None) | StarcoderdataPython |
3353731 | '''
Kattis - fraction
A relatively elementary problem, but it's worth knowing how to solve it. Simply convert between
fractions and continued fractions. Way easier with python fraction library compared to C++.
Time: O(len of continued fraction), Space: O(len of continued fraction)
'''
from fractions import Fraction
import copy
def from_continued_fraction(o_pq):
pq = copy.deepcopy(o_pq[::-1])
a = Fraction(pq[0])
for i in range(1, len(pq)):
a = Fraction(1)/a
a = Fraction(pq[i]) + a
return a
def to_continued_fraction(a):
if (a.denominator == 1):
return [a.numerator]
cur = [a.numerator // a.denominator]
cur += to_continued_fraction(1/(a - cur[0]))
return cur
na, nb = list(map(int, input().split()))
a_pq = list(map(int, input().split()))
b_pq = list(map(int, input().split()))
a = from_continued_fraction(a_pq)
b = from_continued_fraction(b_pq)
s = a + b
d = a - b
m = a * b
q = a / b
for i in to_continued_fraction(s):
print(i, end=" ")
print()
for i in to_continued_fraction(d):
print(i, end=" ")
print()
for i in to_continued_fraction(m):
print(i, end=" ")
print()
for i in to_continued_fraction(q):
print(i, end=" ")
print()
| StarcoderdataPython |
3362876 | # -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
@authors: <NAME> <<EMAIL>>
"""
import os.path
from os import environ as env
import web
from karesansui import KaresansuiGadgetException
from karesansui.lib.file.k2v import K2V
from karesansui.lib.rest import Rest, auth
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_MIN, CHECK_MAX, CHECK_LENGTH
from karesansui.lib.const import PORT_MIN_NUMBER, PORT_MAX_NUMBER, \
EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH
from karesansui.lib.utils import is_param
def validates_mail(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'server'):
check = False
checker.add_error(_('"%s" is required.') % _('Mail Server Name'))
else:
check_server = checker.check_domainname(_('Mail Server Name'),
obj.input.server,
CHECK_EMPTY | CHECK_VALID,
) or \
checker.check_ipaddr(_('Mail Server Name'),
obj.input.server,
CHECK_EMPTY | CHECK_VALID,
)
check = check_server and check
if not is_param(obj.input, 'port'):
check = False
checker.add_error(_('"%s" is required.') % _('Port Number'))
else:
check = checker.check_number(_('Port Number'),
obj.input.port,
CHECK_EMPTY | CHECK_VALID | CHECK_MIN | CHECK_MAX,
PORT_MIN_NUMBER,
PORT_MAX_NUMBER,
) and check
if not is_param(obj.input, 'email'):
check = False
checker.add_error(_('"%s" is required.') % _('Recipient Mail Address'))
else:
check = checker.check_mailaddress(_('Recipient Mail Address'),
obj.input.email,
CHECK_EMPTY | CHECK_VALID | CHECK_LENGTH,
min = EMAIL_MIN_LENGTH,
max = EMAIL_MAX_LENGTH
) and check
obj.view.alert = checker.errors
return check
def get_view_mail(config):
mail = {'server' : config['application.mail.server'],
'port' : config['application.mail.port'],
'email' : config['application.mail.email']}
return mail
class Mail(Rest):
@auth
def _GET(self, *param, **params):
try:
conf = env.get('KARESANSUI_CONF')
_K2V = K2V(conf)
config = _K2V.read()
self.view.mail = get_view_mail(config)
return True
except IOError, kge:
self.logger.debug(kge)
raise KaresansuiGadgetException, kge
@auth
def _PUT(self, *param, **params):
if not validates_mail(self):
return web.badrequest(self.view.alert)
try:
conf = env.get('KARESANSUI_CONF')
_K2V = K2V(conf)
config = _K2V.read()
config['application.mail.server'] = self.input.server
config['application.mail.port'] = self.input.port
config['application.mail.email'] = self.input.email
_K2V.write(config)
self.view.mail = get_view_mail(config)
return True
except IOError, kge:
self.logger.debug(kge)
raise KaresansuiGadgetException, kge
urls = ('/setting/mail/?(\.input|\.part)?$', Mail,)
| StarcoderdataPython |
3240967 | <filename>django_settings/api.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Public module API
from .moduleregistry import RegisterError # noqa
from .dataapi import DataAPI, data # noqa
# shortcuts
get = data.get
set = data.set
exists = data.exists
all = data.all
type_names = data.type_names
# django settings-dependent parts should be loadded lazily
from .lazyimport import lazyimport
db = lazyimport({ # this is also part of public api
'Model': 'django_settings.models',
'Setting': 'django_settings.models',
'registry': 'django_settings.models',
})
# expose methods
register = lambda *a, **kw: db.registry.register(*a, **kw)
unregister = lambda *a, **kw: db.registry.unregister(*a, **kw)
unregister_all = lambda *a, **kw: db.registry.unregister_all(*a, **kw)
| StarcoderdataPython |
3303835 | <reponame>kailas-rathod/Retinet<filename>models/basic_model.py
import os
import numpy as np
import pandas as p
import theano.tensor as T
import lasagne as nn
from lasagne.layers import dnn
from lasagne.nonlinearities import LeakyRectify
from layers import ApplyNonlinearity
from utils import (oversample_set,
get_img_ids_from_dir,
softmax,
split_data)
from losses import (log_loss,
accuracy_loss,
quad_kappa_loss,
quad_kappa_log_hybrid_loss,
quad_kappa_log_hybrid_loss_clipped)
# Main dir used to load files.
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
output_size = 512 # 120
batch_size = 64 # * 2 # * 4
input_height, input_width = (output_size, output_size)
output_dim = 5
num_channels = 3
config_name = 'local_normal_' + str(output_size)
prefix_train = '/media/user/Extended_ext4/train_ds2_crop/'
prefix_test = '/media/user/Extended_ext4/test_ds2_crop/'
# ( image
# level
# 0 25810
# 1 2443
# 2 5292
# 3 873
# 4 708, image
# level
# 0 0.734783
# 1 0.069550
# 2 0.150658
# 3 0.024853
# 4 0.020156)
chunk_size = 128 # * 2 # * 2
num_chunks_train = 30000 // chunk_size * 200
validate_every = num_chunks_train // 50
output_every = num_chunks_train // 400
save_every = num_chunks_train // 200
buffer_size = 3
num_generators = 3
default_transfo_params = {'rotation': True, 'rotation_range': (0, 360),
'contrast': True, 'contrast_range': (0.7, 1.3),
'brightness': True, 'brightness_range': (0.7, 1.3),
'color': True, 'color_range': (0.7, 1.3),
'flip': True, 'flip_prob': 0.5,
'crop': True, 'crop_prob': 0.4,
'crop_w': 0.03, 'crop_h': 0.04,
'keep_aspect_ratio': False,
'resize_pad': False,
'zoom': True, 'zoom_prob': 0.5,
'zoom_range': (0.00, 0.05),
'paired_transfos': False,
'rotation_expand': False,
'crop_height': False,
'extra_width_crop': True,
'rotation_before_resize': False,
'crop_after_rotation': True}
no_transfo_params = {'keep_aspect_ratio':
default_transfo_params['keep_aspect_ratio'],
'resize_pad':
default_transfo_params['resize_pad'],
'extra_width_crop':
default_transfo_params['extra_width_crop'],
'rotation_before_resize':
default_transfo_params['rotation_before_resize'],
'crop_height':
default_transfo_params['crop_height'],
}
pixel_based_norm = False
paired_transfos = True
SEED = 1
sample_coefs = [0, 7, 3, 22, 25]
# [0, 7, 3, 22, 25] gives more even [0.25. 0.19. 0.20. 0.19. 0.18] distribution
switch_chunk = 60 * num_chunks_train // 100
leakiness = 0.5
obj_loss = 'kappalogclipped'
y_pow = 1
# Kappalog
log_scale = 0.50
log_offset = 0.50
# Kappalogclipped
log_cutoff = 0.80
lambda_reg = 0.0002
lr_scale = 6.00
LEARNING_RATE_SCHEDULE = {
1: 0.0010 * lr_scale,
num_chunks_train // 100 * 30: 0.00050 * lr_scale,
num_chunks_train // 100 * 50: 0.00010 * lr_scale,
num_chunks_train // 100 * 85: 0.00001 * lr_scale,
num_chunks_train // 100 * 95: 0.000001 * lr_scale,
}
momentum = 0.90
def build_model():
layers = []
l_in_imgdim = nn.layers.InputLayer(
shape=(batch_size, 2),
name='imgdim'
)
l_in1 = nn.layers.InputLayer(
shape=(batch_size, num_channels, input_width, input_height),
name='images'
)
layers.append(l_in1)
Conv2DLayer = dnn.Conv2DDNNLayer
MaxPool2DLayer = dnn.MaxPool2DDNNLayer
DenseLayer = nn.layers.DenseLayer
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(7, 7), stride=(2, 2),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=32, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
# l_conv = Conv2DLayer(layers[-1],
# num_filters=32, filter_size=(3, 3), stride=(1, 1),
# border_mode='same',
# nonlinearity=LeakyRectify(leakiness),
# W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
# untie_biases=True,
# learning_rate_scale=1.0)
# layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=64, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=64, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
# l_conv = Conv2DLayer(layers[-1],
# num_filters=64, filter_size=(3, 3), stride=(1, 1),
# border_mode='same',
# nonlinearity=LeakyRectify(leakiness),
# W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
# untie_biases=True,
# learning_rate_scale=1.0)
# layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=128, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2))
layers.append(l_pool)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_conv = Conv2DLayer(layers[-1],
num_filters=256, filter_size=(3, 3), stride=(1, 1),
border_mode='same',
nonlinearity=LeakyRectify(leakiness),
W=nn.init.Orthogonal(1.0), b=nn.init.Constant(0.1),
untie_biases=True)
layers.append(l_conv)
l_pool = MaxPool2DLayer(layers[-1], pool_size=(3, 3), stride=(2, 2),
name='coarse_last_pool')
layers.append(l_pool)
layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
layers.append(DenseLayer(layers[-1],
nonlinearity=None,
num_units=1024,
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1),
name='first_fc_0'))
l_pool = nn.layers.FeaturePoolLayer(layers[-1],
pool_size=2,
pool_function=T.max)
layers.append(l_pool)
l_first_repr = layers[-1]
l_coarse_repr = nn.layers.concat([l_first_repr,
l_in_imgdim])
layers.append(l_coarse_repr)
# Combine representations of both eyes.
layers.append(
nn.layers.ReshapeLayer(layers[-1], shape=(batch_size // 2, -1)))
layers.append(nn.layers.DropoutLayer(layers[-1], p=0.5))
layers.append(nn.layers.DenseLayer(layers[-1],
nonlinearity=None,
num_units=1024,
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1),
name='combine_repr_fc'))
l_pool = nn.layers.FeaturePoolLayer(layers[-1],
pool_size=2,
pool_function=T.max)
layers.append(l_pool)
l_hidden = nn.layers.DenseLayer(nn.layers.DropoutLayer(layers[-1], p=0.5),
num_units=output_dim * 2,
nonlinearity=None, # No softmax yet!
W=nn.init.Orthogonal(1.0),
b=nn.init.Constant(0.1))
layers.append(l_hidden)
# Reshape back to 5.
layers.append(nn.layers.ReshapeLayer(layers[-1],
shape=(batch_size, 5)))
# Apply softmax.
l_out = ApplyNonlinearity(layers[-1],
nonlinearity=nn.nonlinearities.softmax)
layers.append(l_out)
l_ins = [l_in1, l_in_imgdim]
return l_out, l_ins
config_name += '_' + obj_loss
if obj_loss == 'kappalog':
config_name += '_logscale_' + str(log_scale)
config_name += '_logoffset_' + str(log_offset)
elif 'kappalogclipped' in obj_loss:
config_name += '_logcutoff_' + str(log_cutoff)
config_name += '_reg_' + str(lambda_reg)
if obj_loss == 'log':
loss_function = log_loss
elif obj_loss == 'acc':
loss_function = accuracy_loss
elif obj_loss == 'kappa':
def loss(y, t):
return quad_kappa_loss(y, t,
y_pow=y_pow)
loss_function = loss
elif obj_loss == 'kappalog':
def loss(y, t):
return quad_kappa_log_hybrid_loss(y, t,
y_pow=y_pow,
log_scale=log_scale,
log_offset=log_offset)
loss_function = loss
elif obj_loss == 'kappalogclipped':
def loss(y, t):
return quad_kappa_log_hybrid_loss_clipped(y, t,
y_pow=y_pow,
log_cutoff=log_cutoff)
loss_function = loss
else:
raise ValueError("Need obj_loss param.")
def build_objective(l_out, loss_function=loss_function,
lambda_reg=lambda_reg):
params = nn.layers.get_all_params(l_out, regularizable=True)
reg_term = sum(T.sum(p ** 2) for p in params)
def loss(y, t):
return loss_function(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
train_labels = p.read_csv(os.path.join(base_dir, 'data/trainLabels.csv'))
labels_split = p.DataFrame(list(train_labels.image.str.split('_')),
columns=['id', 'eye'])
labels_split['level'] = train_labels.level
labels_split['id'] = labels_split['id'].astype('int')
id_train, y_train, id_valid, y_valid = split_data(train_labels, labels_split,
valid_size=10,
SEED=SEED, pairs=True)
# Change train dataset to oversample other labels.
# Total sizes:
# ( image
# level
# 0 25810
# 1 2443
# 2 5292
# 3 873
# 4 708, image
# level
# 0 0.734783
# 1 0.069550
# 2 0.150658
# 3 0.024853
# 4 0.020156)
pl_enabled = True
pl_softmax_temp = 2
pl_train_coef = 5
pl_train_fn = ''
pl_test_fn = ''
pl_log = False
if pl_enabled:
pl_test_fn = '2015_07_14_072437_6_log_mean.npy'
test_preds = np.load(os.path.join(base_dir, 'preds/' + pl_test_fn))
if test_preds.shape[1] > 5:
test_preds = test_preds[:, -5:].astype('float32')
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
print "Orig test preds:\n\n"
print test_preds[:10], '\n'
if np.mean(test_preds) > 0:
# These are not log probs, so can do log.
test_preds = np.log(1e-5 + test_preds)
test_probs = softmax(test_preds, temp=pl_softmax_temp)
# Double ids so only every other.
images_test_pl = sorted(set(get_img_ids_from_dir(prefix_test)))
labels_test_pl = test_probs.reshape((-1, 2, 5))
print "\nImages for test:\n\n"
print images_test_pl[:5], '\n'
print "\nLabels for test:\n\n"
print labels_test_pl[:5], '\n'
# Add only test PL for now.
id_train_oversample, labels_train_oversample = oversample_set(id_train,
y_train,
sample_coefs)
# First train set.
images_train_0 = list(id_train_oversample) + images_test_pl
labels_train_pl = np.eye(5)[
list(labels_train_oversample.flatten().astype('int32'))
].reshape((-1, 2, 5))
labels_train_0 = np.vstack([labels_train_pl,
labels_test_pl]).astype('float32')
# Second train set.
images_train_1 = list(id_train) * pl_train_coef + images_test_pl
labels_train_pl = np.eye(5)[
list(y_train.flatten().astype('int32')) * pl_train_coef
].reshape((-1, 2, 5))
labels_train_1 = np.vstack([labels_train_pl,
labels_test_pl]).astype('float32')
images_train_eval = id_train[:]
labels_train_eval = y_train[:].astype('int32')
images_valid_eval = id_valid[:]
labels_valid_eval = y_valid[:].astype('int32')
else:
id_train_oversample, labels_train_oversample = oversample_set(id_train,
y_train,
sample_coefs)
images_train_0 = id_train_oversample
labels_train_0 = labels_train_oversample.astype('int32')
images_train_1, labels_train_1 = id_train, y_train.astype('int32')
images_train_eval = id_train[:]
labels_train_eval = y_train[:].astype('int32')
images_valid_eval = id_valid[:]
labels_valid_eval = y_valid[:].astype('int32')
| StarcoderdataPython |
171968 | import numpy as np
import SimpleITK as sitk
from napari_imsmicrolink.data.image_transform import ImageTransform
def test_ImageTransform_add_points():
test_pts = np.array([[50.75, 100.0], [20.0, 10.0], [10.0, 50.0], [60.0, 20.0]])
itfm = ImageTransform()
itfm.output_spacing = (1, 1)
itfm.add_points(test_pts, round=True, src_or_tgt="source", scaling=100)
assert itfm.source_pts is not None
assert itfm.source_pts[0, 0] == 5100.0
assert itfm.source_pts[3, 0] == 6000.0
itfm.add_points(test_pts, round=False, src_or_tgt="source", scaling=100)
assert itfm.source_pts[0, 0] == 50.75 * 100
assert itfm.source_pts[3, 0] == 60 * 100
itfm.add_points(test_pts, round=False, src_or_tgt="target", scaling=100)
assert itfm.target_pts is not None
def test_ImageTransform_compute_transform():
source_pts = np.array(
[
[356.93356879, 6713.16214535],
[6285.96351516, 11137.0624842],
[15154.40947051, 7596.13949593],
[6905.28155271, 936.74985065],
]
)
target_pts = np.array(
[[500.0, 6250.0], [6400.0, 10700.0], [15300.0, 7200.0], [7100.0, 500.0]]
)
itfm = ImageTransform()
itfm.output_spacing = (0.92, 0.92)
itfm.add_points(source_pts, round=False, src_or_tgt="source", scaling=1)
itfm.add_points(target_pts, round=True, src_or_tgt="target", scaling=1)
assert itfm.affine_transform is not None
assert itfm.affine_np_mat_xy_um is not None
assert itfm.affine_np_mat_yx_um is not None
assert itfm.affine_np_mat_xy_px is not None
assert itfm.affine_np_mat_yx_px is not None
assert itfm.inverse_affine_transform is not None
assert itfm.inverse_affine_np_mat_xy_um is not None
assert itfm.inverse_affine_np_mat_yx_um is not None
assert itfm.inverse_affine_np_mat_xy_px is not None
assert itfm.inverse_affine_np_mat_yx_px is not None
assert itfm.point_reg_error < 10
def test_ImageTransform_apply_transform_pts():
aff_tform = sitk.AffineTransform(2)
aff_tform.SetMatrix([2, 0, 0, 2])
pts = np.array([[1, 1], [2, 2]]).astype(float)
tformed_pts = ImageTransform.apply_transform_to_pts(pts, aff_tform)
scaled_pts = np.array([[2, 2], [4, 4]]).astype(np.double)
np.testing.assert_array_equal(tformed_pts, scaled_pts)
| StarcoderdataPython |
1775773 | from matplotlib.colors import ListedColormap,LogNorm
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import cmasher as cma
import pandas as pd
from matplotlib.ticker import LogLocator,AutoLocator,AutoMinorLocator,MaxNLocator
from .sampler import to_time_series, TigressWindSampler
discrete_cmap = ListedColormap(sns.color_palette('tab20c',n_colors=20,desat=0.5).as_hex())
pdf_cmap = cma.fall_r
pdfmin = -2.5
pdfmax = 1
def pdf_projection(pdf,wpdf=None,dvB=0.02,dMach=0.02):
"""Obtain 1D PDFs prjected onto u, w, log vB, log Mach
Parameters
----------
pdf : xarray.Dataset
2D pdf of u and w (from TigressWindModel)
wpdf : xarray.Dataset, optional
weight field
dvB : float
log vB bin (the default is 0.02)
dMach : float
log Mach bin (the default is 0.02)
Returns
-------
bins, pdf : list of tuples (bin, pdf) for all four variables
[(u, pdf_u), (w, pdf_w), (logvB, pdf_logvB) (log Mach, pdf_logMach)]
"""
dlogcs = np.diff(pdf.logcs)[0]
dlogvout = np.diff(pdf.logvout)[0]
dbinsq = dlogcs*dlogvout
if wpdf is None:
pdf_u = pdf.sum(dim=['logcs'])*dlogcs
pdf_w = pdf.sum(dim=['logvout'])*dlogvout
else:
pdf_u = (pdf*wpdf).sum(dim=['logcs'])/wpdf.sum(dim=['logcs'])
pdf_w = (pdf*wpdf).sum(dim=['logvout'])/wpdf.sum(dim=['logvout'])
vBz_bins = np.arange(0,4,dvB)
Mach_bins = np.arange(-2,2,dMach)
cs = 10.**pdf.logcs
vout = 10.**pdf.logvout
vBz = np.sqrt(5.0*cs**2+vout**2)
Mach = 1/cs*vout
vBz.name = 'vBz'
Mach.name = 'Mach'
if wpdf is None:
pdf_vBz = pdf.groupby_bins(np.log10(vBz),vBz_bins)
pdf_vBz = pdf_vBz.sum(dim=['stacked_logcs_logvout'])*dbinsq/dvB
pdf_Mach = pdf.groupby_bins(np.log10(Mach),Mach_bins)
pdf_Mach = pdf_Mach.sum(dim=['stacked_logcs_logvout'])*dbinsq/dMach
else:
wpdf_vBz = wpdf.groupby_bins(np.log10(vBz),vBz_bins)
wpdf_vBz = wpdf_vBz.sum(dim=['stacked_logcs_logvout'])
wpdf_Mach = wpdf.groupby_bins(np.log10(Mach),Mach_bins)
wpdf_Mach = wpdf_Mach.sum(dim=['stacked_logcs_logvout'])
pdf_vBz = (pdf*wpdf).groupby_bins(np.log10(vBz),vBz_bins)
pdf_vBz = pdf_vBz.sum(dim=['stacked_logcs_logvout'])/wpdf_vBz
pdf_Mach = (pdf*wpdf).groupby_bins(np.log10(Mach),Mach_bins)
pdf_Mach = pdf_Mach.sum(dim=['stacked_logcs_logvout'])/wpdf_Mach
pdf_list=[(pdf.logvout,pdf_u),(pdf.logcs,pdf_w),
(vBz_bins[:-1]+0.5*dvB,pdf_vBz.T),
(Mach_bins[:-1]+0.5*dMach,pdf_Mach.T)]
return pdf_list
def scifmt(value,fmt=':9.1e'):
"""Formatting float in scientific format"""
maxdigits=int(fmt.split('.')[1][0])+1
sp='{{{}}}'.format(fmt).format(value).split('e')
digits=int(sp[1])
if abs(digits) < maxdigits:
return'{{{}}}'.format(':9.{}f'.format(maxdigits-digits-1)).format(value)
else:
if eval(sp[0]) == 1.0:
return '10^{{{}}}'.format(int(sp[1]))
else:
return sp[0]+'\\cdot 10^{{{}}}'.format(int(sp[1]))
def add_vBM_grid(ax,pdf,vB_labels=True,M_labels=True):
"""Add contours of log vB and log Mach
Parameters
----------
ax : matplotlib.axes
pdf : xarray.Dataset
Dataset created by TigressWindModel or TigressSimLoader
vB_labels, M_labels : bool
add contour labels
"""
levels=[1,1.5,2,2.5,3,3.5]
u = pdf.logvout
w = pdf.logcs
vB = pdf['vBz']
ct=ax.contour(u,w,np.log10(vB),levels=levels,colors='gray',alpha=1.0,linestyles=':')
if vB_labels:
ax.clabel(ct,[3],inline=1, inline_spacing=40,manual=[(1.0,3)],
fmt=r'$\log_{10} v_{\mathcal{B},z}=%1.1f$', fontsize='x-small')
ax.clabel(ct,[1,1.5,2,2.5,3.5],inline=1, fmt=r'$%1.1f$', fontsize='x-small')
levels=[-2,-1,0,1,2]
Mach = pdf['Mach']
ct=ax.contour(u,w,np.log10(Mach),levels=levels,colors='gray',alpha=1.0,linestyles=':')
if M_labels:
ax.clabel(ct,[-1],inline=1, inline_spacing=40,manual=[(0.5,2.5)],
fmt=r'$\log_{10} \mathcal{M}=%1.0f$', fontsize='x-small')
ax.clabel(ct,[-2,0,1,2],inline=1,manual=[(0.5,2.5),(0.7,0.5),(1.5,0.4),(2.5,0.5)],
fmt=r'$%1.0f$', fontsize='x-small')
def add_phase_lines(ax,labels=True):
"""Add horizontal lines for phase separation
"""
T1=2.e4
T2=5.e5
from .tigress_tools import T2cs
ax.axhline(T2cs(T1),color='C0',ls='--',lw=1)
ax.axhline(T2cs(T2),color='C1',ls='--',lw=1)
if labels:
ax.annotate('cool',(3.4,T2cs(T1)-0.02),color='C0',
ha='right',va='top',fontsize='small')
ax.annotate('hot',(3.4,T2cs(T2)+0.02),color='C1',
ha='right',va='bottom',fontsize='small')
ax.annotate('int.',(3.4,0.5*(T2cs(T1)+T2cs(T2))),color='C2',
ha='right',va='center',fontsize='small')
def toggle_xticks(axes,visible=False):
plt.setp([ax.get_xticklabels() for ax in axes],visible=visible)
def toggle_yticks(axes,visible=False):
plt.setp([ax.get_yticklabels() for ax in axes],visible=visible)
def show2d(pdf,xlabel=True,ylabel=True,label=None,vBM=[False,False],**kwargs):
"""Display 2D joint PDF as image
"""
dbin = np.diff(pdf.logvout)[0]
extent=[pdf.logvout.min(),pdf.logvout.max()+dbin,pdf.logcs.min(),pdf.logcs.max()+dbin]
im=plt.imshow(np.log10(pdf),vmin=pdfmin,vmax=pdfmax,origin='lower',
extent=extent,cmap=pdf_cmap,**kwargs)
ax=plt.gca()
ax.set_aspect('equal')
ax.set_xlim(0,3.5)
ax.set_ylim(0,3.5)
if ylabel: ax.set_ylabel(r'$w\equiv\log_{10}\,c_s\,[\rm km/s]$')
if xlabel: ax.set_xlabel(r'$u\equiv\log_{10}\,v_{\rm out}\,[\rm km/s]$')
ax.set_yticks([0,1,2,3])
ax.set_xticks([0,1,2,3])
if label is not None:
ax.annotate(label,(0.02,0.98),xycoords='axes fraction',
ha='left',va='top',fontsize='small')
if any(vBM):
add_vBM_grid(ax,pdf,vB_labels=vBM[0],M_labels=vBM[1])
return im
def show2d_ct(pdf,xlabel=True,ylabel=True,label=None,**kwargs):
"""Display 2D joint PDF as contours
"""
dbin = np.diff(pdf.logvout)[0]
extent=[pdf.logvout.min(),pdf.logvout.max()+dbin,pdf.logcs.min(),pdf.logcs.max()+dbin]
ct=plt.contour(pdf.logvout,pdf.logcs,np.log10(pdf),
vmin=pdfmin,vmax=pdfmax,
cmap=pdf_cmap,levels=np.arange(pdfmin,pdfmax+0.5,0.5),
linewidths=3,**kwargs)
ax=plt.gca()
ax.set_aspect('equal')
ax.set_xlim(0,3.5)
ax.set_ylim(0,3.5)
if ylabel: ax.set_ylabel(r'$w\equiv\log_{10}\,c_s\,[\rm km/s]$')
if xlabel: ax.set_xlabel(r'$u\equiv\log_{10}\,v_{\rm out}\,[\rm km/s]$')
ax.set_yticks([0,1,2,3])
ax.set_xticks([0,1,2,3])
if label is not None:
ax.annotate(label,(0.02,0.98),xycoords='axes fraction',
ha='left',va='top',fontsize='small')
return ct
def plot_flux_pdfs_yZ(pdf,grid=False):
"""Three-panel figure for Mpdf, Epdf, and yZ
Figure 1 of the wind launching paper
Parameters
----------
pdf : xarray.Dataset
a joint pdf from simulation (TigressSimLoader)
grid : bool
toggle grid of vBz and Mach
"""
fig,axes=plt.subplots(3,1,figsize=(5,12),sharex=True,
gridspec_kw=dict(left=0.06,right=0.90,
bottom=0.15,top=0.98,
hspace=0.0))
fields = ['Mpdf','Epdf','yZ']
labels = [[r'(a) mass loading',r'$\log_{10}\, f_{M}(u,w)\,[{\rm dex^{-2}}]$'],
[r'(b) energy loading',r'$\log_{10}\, f_{E}(u,w)\,[{\rm dex^{-2}}]$'],
[r'(c) enrichment factor',r'$\zeta(u,w)$']]
for i,ax,wf,lab in zip(range(len(fields)),axes[:],fields,labels):
attrs=pdf.attrs
pdfdata=pdf[wf]
dbin=attrs['dbin']
u = pdf.logvout
w = pdf.logcs
extent=[u.min(),u.max()+dbin,w.min(),w.max()+dbin]
if wf.startswith('Z'):
im=ax.imshow(pdfdata,origin='lower',extent=extent,
vmin=0.02,vmax=0.12,cmap=discrete_cmap)
elif wf.startswith('yZ'):
im=ax.imshow(pdfdata,origin='lower',extent=extent,
vmin=1,vmax=3.5,cmap=discrete_cmap)
else:
im=ax.imshow(np.log10(pdfdata),origin='lower',extent=extent,
vmin=pdfmin,vmax=pdfmax,cmap=pdf_cmap)
ax.set_aspect('equal')
ax.set_xlim(0,3.5)
ax.set_ylim(0,3.5)
ax.set_xticks([0,1,2,3])
ax.set_yticks([0,1,2,3])
if i == 2: ax.set_xlabel(r'$u\equiv\log_{10}\,v_{\rm out}\,[\rm km/s]$')
add_phase_lines(ax,labels=(i==0))
if grid:
add_vBM_grid(ax,pdf,vB_labels=(i==0),M_labels=(i==1))
else:
add_vB_lines(ax,pdf,labels=(i==1))
ax.annotate(lab[0],(0.02,0.98),xycoords='axes fraction',ha='left',va='top',fontsize='small')
cbar=plt.colorbar(im,ax=ax,pad=0)
cbar.set_label(lab[1])
if i < 2:
cbar.set_ticks([-3,-2,-1,0,1])
cbar.ax.set_yticklabels(['',-2,-1,0,1])
ax.set_ylabel(r'$w\equiv\log_{10}\,c_s\,[\rm km/s]$')
return fig
def plot_vB_projection(sim,reconstructed=True,**kwargs):
"""Plot four 1D pdfs along log vB from simulation
Figure 2(a) of the wind launching paper.
Parameters
----------
sim : TigressSimLoader
"""
fields=['Mpdf','ppdf','Epdf','Zpdf']
labels=[r'$f_M$',
r'$f_p$',
r'$f_E$',
r'$f_Z$',]
colors=['C3','C0','C1','C2']
pdf = sim.simpdf
for k,label,c in zip(fields,labels,colors):
pdf1d = pdf_projection(pdf[k])
x, y = pdf1d[2]
l, = plt.step(x,y,label=label,lw=3,color=c,alpha=0.5)
if reconstructed:
if k+'_r' in pdf:
pdf1d = pdf_projection(pdf[k+'_r'])
x, y = pdf1d[2]
plt.step(x,y,lw=1,color=l.get_color())
plt.xlim(1,3.5)
plt.ylabel(r'$f_q(\log_{10}\,v_{\mathcal{B},z})\,[{\rm dex}^{-1}]$')
plt.xlabel(r'$\log_{10}\, v_{\mathcal{B},z}\,{\rm [km/s]}$')
def plot_vB_projection_ratio(sim,**kwargs):
"""Plot ratios of recunstructed and original 1D pdfs along log vB
Figure 2(b) of the wind launching paper.
Parameters
----------
sim : TigressSimLoader
"""
fields=['ppdf','Epdf','Zpdf']
labels=[r'$f_{p}^r/f_{p}$',
r'$f_{E}^r/f_{E}$',
r'$f_{Z}^r/f_{Z}$',]
colors=['C0','C1','C2','C3','C4','C5','C6','C7']
for k,label,c in zip(fields,labels,colors):
ratio = sim.simpdf[k+'_r']/sim.simpdf[k]
pdf1d = pdf_projection(ratio,wpdf=sim.simpdf['Mpdf'])
x, y = pdf1d[2]
l,=plt.plot(x,y,label=label,color=c,**kwargs)
plt.xlim(1,3.5)
plt.ylabel('Ratio')
plt.xlabel(r'$\log_{10}\, v_{\mathcal{B},z}\,{\rm [km/s]}$')
def flux_reconstruction(sims,stdmodel='R4'):
"""Two-panel figure for comparison of orignal and reconstructed PDFs
Figure 2 of the wind launching paper
Parameters
----------
sims : TigressSimContainer
stdmodel : ['R2','R4','R8','R16','LGR2','LGR4','LGR8']
"""
fig,axes = plt.subplots(1,2,figsize=(10,4))
plt.sca(axes[0])
plot_vB_projection(sims[stdmodel])
plt.legend(loc=9,fontsize='x-small')
plt.xlim(1,3.5)
plt.xticks([1,2,3,])
plt.yscale('log')
plt.ylim(1.e-2,10)
plt.annotate('(a)',(0.03,0.97),xycoords='axes fraction',ha='left',va='top')
plt.sca(axes[1])
plot_vB_projection_ratio(sims[stdmodel],lw=3)
plt.legend(loc=4,fontsize='small')
plt.annotate('(b)',(0.03,0.97),xycoords='axes fraction',ha='left',va='top')
for k,sim in sims.items():
plot_vB_projection_ratio(sim,lw=1,alpha=0.5)
plt.ylim(0,1.3)
plt.xlim(1,3.5)
plt.xticks([1,2,3,])
plt.axhline(1,ls=':')
plt.axhline(0,ls=':')
plt.tight_layout()
return fig
def plot_proj(pdf1d,axes,**kwargs):
"""Plot all four 1D histograms
Figure 4 right panels.
Parameters
----------
pdf1d : list
bin and 1d pdf prjected on different axes
created by pdf_projection
axes : list
axes to plot
"""
xlabels=[r'$v_{\rm out} [{\rm km/s}]$',
r'$c_s [{\rm km/s}]$',
r'$v_{\mathcal{B},z} [{\rm km/s}]$',
r'$\mathcal{M}$']
for ax,(x,y),xlab in zip(axes,pdf1d,xlabels):
plt.sca(ax)
plt.step(x,y,where='mid',**kwargs)
plt.yscale('log')
plt.ylim(1.e-3,10)
def comparison_pdfs(sim,q='M'):
"""Five-panel figure of 2D and 1D PDFs
Figure 3 of the wind launching paper
Parameters
----------
sim : TigressSimLoader
q : ['M','p','E','Z']
varialble to plot
"""
modelpdf=sim.build_model()
def set_fivepanel_axes(nrows=2):
'''
construct custom axes for one 2D PDFs and four 1D histograms
'''
fig=plt.figure(figsize=(20,4*nrows),constrained_layout=False)
outergrid=fig.add_gridspec(nrows,2,width_ratios=[1.2,4],wspace=0.2)
axes=[]
for i in range(nrows):
axes_row=[]
innergrid=outergrid[i*2].subgridspec(1,2,width_ratios=[1,0.05],wspace=0)
for ig in innergrid:
axes_row.append(fig.add_subplot(ig))
innergrid=outergrid[i*2+1].subgridspec(1,4,width_ratios=[1,1,1,1],wspace=0)
for ig in innergrid:
axes_row.append(fig.add_subplot(ig))
axes.append(axes_row)
if nrows == 1:
return fig,axes[0]
else:
return fig,axes
fig,_axes = set_fivepanel_axes(nrows=1)
# draw simulation PDF
plt.sca(_axes[0])
im = show2d(sim.simpdf[q+'pdf'],alpha=0.7)
plt.colorbar(im,cax=_axes[1])
# draw 1D projections
pdf1d = pdf_projection(sim.simpdf[q+'pdf'])
plot_proj(pdf1d,_axes[2:],color='k',lw=1,label='sim.')
# draw model PDF contour
plt.sca(_axes[0])
ct = show2d_ct(modelpdf[q+'pdf'])
cbar = plt.colorbar(ct,cax=_axes[1])
cbar.set_ticks([-2,-1,0,1])
# draw 1D projections
pdf1d = pdf_projection(modelpdf[q+'pdf-cool'])
plot_proj(pdf1d,_axes[2:],lw=2,label='cool',zorder=1)
pdf1d = pdf_projection(modelpdf[q+'pdf-hot'])
plot_proj(pdf1d,_axes[2:],lw=2,label='hot',zorder=1)
pdf1d = pdf_projection(modelpdf[q+'pdf'])
plot_proj(pdf1d,_axes[2:],lw=3,label='total',zorder=0)
toggle_yticks(_axes[3:])
xlabels=[r'$\log_{10}\,v_{\rm out}$',r'$\log_{10}\,c_s$',
r'$\log_{10}\,v_{\mathcal{B},z}$',r'$\log_{10}\,\mathcal{M}$']
xlims=[(0,3.5),(0,3.5),(0.5,4),(-1.5,2)]
for ax,xlab,xlim in zip(_axes[2:],xlabels,xlims):
ax.set_xlabel(xlab+r'$\,[\rm km/s]$')
ax.set_xlim(xlim)
_axes[2].set_ylabel(r'$f_{}\,[{{\rm dex}}^{{-1}}]$'.format(q))
_axes[-1].legend(fontsize='xx-small')
for ax in _axes[2:]:
ax.yaxis.set_major_locator(LogLocator(numticks=15))
ax.yaxis.set_minor_locator(LogLocator(subs=np.arange(2, 10) * .1, numticks=15))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
return fig
def show_loading(model,vlist=[0,30,1000,300,100],sims=None):
"""Loading factor scaling as a function of Sigma_SFR
Figure 4 of the wind launching paper
Parameters
----------
model : TigressWindModel
vlist : list
list of escape velocities
sims : TigressSimContainer (optional)
if passed, overplot simulation data point
"""
fig,axes = plt.subplots(1,4,sharey='row',sharex='col',figsize=(18,5),
gridspec_kw=dict(hspace=0.1,wspace=0.1))
sfr=10.**model.logsfr
vBz=model.vBz
for ax,q in zip(axes,'MpEZ'):
for i,vesc in enumerate(vlist):
dbinsq=model.attrs['dlogvout']*model.attrs['dlogcs']
eta = model['eta'+q]
pdf = model[q+'pdf']
cdf = pdf.where(vBz>vesc).sum(dim=['logcs','logvout'])*dbinsq
loading = cdf*eta
plt.sca(ax)
if sims is not None:
for k,sim in sims.items():
simpdf = sim.simpdf
simeta = sim.simpdf.attrs['eta'+q]
dbin = sim.simpdf.attrs['dbin']
simsfr = sim.simpdf.attrs['sfr']
simcdf = simpdf[q+'pdf'].where(sim.simpdf['vBz']>vesc).sum()*dbin**2
simloading = simcdf*simeta
plt.plot(simsfr,simloading,'o',color='C{}'.format(i))
plt.plot(sfr,loading,color='C{}'.format(i))
plt.xscale('log')
axes[0].set_ylabel(r'$\eta\,(v_{\mathcal{B},z}>v_{\rm esc})$')
plt.setp(axes,'yscale','log')
plt.setp(axes,'xscale','log')
plt.setp(axes,'xlim',(2.e-5,2))
plt.setp(axes,'ylim',(5.e-3,1.e2))
ax=axes[0]
ax.annotate(r'$v_{\rm esc}=0$',(5.e-4,50),ha='left',va='top',color='C0',fontsize='small')
ax.annotate('$30$',(1.e-4,5),ha='left',va='bottom',color='C1',fontsize='small')
ax.annotate('$100$',(1.5e-4,0.35),ha='left',va='bottom',color='C4',fontsize='small')
ax.annotate('$300$',(1.5e-4,0.25),ha='left',va='top',color='C3',fontsize='small')
ax.annotate(r'$10^3$',(2.e-4,0.015),ha='left',va='bottom',color='C2',fontsize='small')
plt.setp(axes,'xlabel',r'$\Sigma_{\rm SFR}\,[{\rm M_\odot\,kpc^{-2}\,yr^{-1}}]$')
return fig
def sampling_from_simulation_sfr(sim,tdelay=10):
"""Three-panel figure for
(a) mass outflow rate of cool gas
(b) energy outflow rate of hot gas
(c) distribution of sampled particles
Parameters
----------
sim : TigressSimLoader
tdelay : float
time delay to be applied to sampled data in Myr
"""
sampler=TigressWindSampler(z0=sim.z0)
ts = sim.time_series
sfr = ts['sfr10']
time = ts['tMyr']
dt = 1.e6
area = 1.024
mc = (1.e4,1.e5,1.e6)
mh = (1.e2,1.e3,1.e4)
# determine indices that match the desired time range
torb = ts.torb
idx = np.arange(len(torb))
idx = idx[(torb>1) & (torb<2)]
imin, imax = idx.min(), idx.max()
fig=plt.figure(figsize=(18,8),constrained_layout=False)
og=fig.add_gridspec(2,2,width_ratios=[1.5,1],wspace=0.2,hspace=0.1)
axes=[]
axes.append(fig.add_subplot(og[0,0]))
axes.append(fig.add_subplot(og[1,0]))
axes.append(fig.add_subplot(og[:,1]))
refs,etas,etac,etah = sampler.get_refs(sfr)
axes[0].fill_between(time,0,refs[0]*area,color='k',alpha=0.2,lw=0)
axes[1].fill_between(time,0,refs[2]*area,color='k',alpha=0.2,lw=0)
for m1, m2, alpha in zip(mc,mh,[0.5,0.7,0.9]):
cool,hot = sampler.draw_mass(sfr,m1,m2,area=area,dt=dt)
ts_cool = to_time_series(cool,time)
ts_hot = to_time_series(hot,time)
# plot mass outflow rate of cool gas
plt.sca(axes[0])
sr = pd.Series(ts_cool[0]/dt,time)
plt.plot(time+tdelay,sr.rolling(10).mean(),
lw=2,label=r'${}M_\odot$'.format(scifmt(m1)))
plt.yscale('log')
plt.ylim(1.e-3,1)
plt.legend(loc=1,ncol=3,title=r'$m^{{\rm cool}}$',
fontsize='x-small',framealpha=1.0)
plt.ylabel(r'$\dot{M}_{\rm out}\,[M_\odot\,{\rm yr^{-1}}]$')
plt.annotate('(a)',(0.02,0.95),xycoords='axes fraction',ha='left',va='top')
toggle_xticks([axes[0]])
# plot energy outflow rate of hot gas
plt.sca(axes[1])
sr = pd.Series(ts_hot[2]/dt,time)
plt.plot(time+tdelay,sr.rolling(10).mean(),
lw=2,label=r'${}M_\odot$'.format(scifmt(m2))+
r'$('+scifmt(ts_hot[2].mean(),fmt=':9.1e')+
r'{{\rm erg}})$')
plt.ylabel(r'$\dot{E}_{\rm out}\,[{\rm erg\,yr^{-1}}]$')
plt.xlabel(r'${\rm time [Myr]}$')
plt.legend(loc=1,fontsize='x-small',framealpha=1.0,ncol=3,
title=r'$m^{\rm hot}( \overline{e^{{\rm hot}}})$')
plt.annotate('(b)',(0.02,0.95),xycoords='axes fraction',ha='left',va='top')
plt.setp(axes[:2],'xlim',(0,600))
plt.yscale('log')
# plot sampled particle distribution on top of simulation distribution
# only particles in 1<t/torb<2
plt.sca(axes[2])
show2d(sim.simpdf['Mpdf'],alpha=0.5)
cool_idx = (cool['idx']>imin) & (cool['idx']<imax)
hot_idx = (hot['idx']>imin) & (hot['idx']<imax)
uc = np.log10(cool['vz'][cool_idx])
wc = np.log10(cool['cs'][cool_idx])
uh = np.log10(hot['vz'][hot_idx])
wh = np.log10(hot['cs'][hot_idx])
l,=plt.plot(uc,wc,'o',alpha=alpha,markeredgewidth=0,
label=r'$N^{{\rm cool}}={}$'.format(len(uc)))
plt.plot(uh,wh,'s',alpha=alpha,color=l.get_color(),
markeredgewidth=0,label=r'$N^{{\rm hot}}={}$'.format(len(uh)))
plt.annotate('(c)',(0.05,0.95),xycoords='axes fraction',ha='left',va='top')
plt.legend(fontsize='small',loc=4)
axes[0].set_ylim(1.e-3,1)
axes[0].plot(ts['tMyr'],ts['mass_whole'],color='k')
axes[1].set_ylim(1.e44,2.e48)
axes[1].plot(ts['tMyr'],ts['energy_whole'],color='k')
return fig
| StarcoderdataPython |
1644103 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_repr
Unit tests for the _repr module.
"""
import kineticstoolkit._repr as _repr
def test_format_dict_entries():
"""Test dict formatting."""
# Test with all strings in keys
d = {
'key1': 'value1',
'key2': 'value2',
}
assert(
_repr._format_dict_entries(d)
== " 'key1': 'value1'\n 'key2': 'value2'\n"
)
assert(
_repr._format_dict_entries(d, quotes=False)
== " key1: 'value1'\n key2: 'value2'\n"
)
# Test with mixed types in keys and long values
d = {
'1': 'value1',
2: 'value2',
3.0: 'A' * 200,
}
assert(
_repr._format_dict_entries(d)
== " '1': 'value1'\n 2: 'value2'\n 3.0: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA...\n"
)
if __name__ == "__main__":
import pytest
pytest.main([__file__])
| StarcoderdataPython |
1779814 | <reponame>JoshuaJoost/GNN_SS20
import numpy as np
import tensorflow as tf
print(f"numpy version: {np.__version__}")
# print(f"tensorflow version: {tf.__verion__}") | StarcoderdataPython |
1784584 | <reponame>srp-31/Face-Mask-Detection<filename>src/front_end_app/main.py
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input,Output,State,MATCH,ALL,ALLSMALLER
from flask import Flask, Response
import cv2
from PIL import Image, ImageEnhance
import numpy as np
import cv2
import os
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
import detect_mask_image
import json
import base64
UPLOAD_DIRECTORY='./uploaded_images'
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
server = Flask(__name__)
app = dash.Dash(__name__, server=server)
@server.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
def save_file(name, content):
"""Decode and store a file uploaded with Plotly Dash."""
data = content.encode("utf8").split(b";base64,")[1]
with open(os.path.join(UPLOAD_DIRECTORY, name), "wb") as fp:
fp.write(base64.decodebytes(data))
DYNAMIC_CONTROLS = {
'UI': dcc.Upload(
id={
'type': 'input-data',
'index': 0
},
children=html.Div([
'Drag and Drop or ',
html.A('Select File')
]),
style={
'width': '100%',
'height': '60px',
'lineHeight': '60px',
'borderWidth': '1px',
'borderStyle': 'dashed',
'borderRadius': '5px',
'textAlign': 'center',
'margin': '10px'
},
),
# 'UV': dcc.Upload(
# id={
# 'type': 'input-data',
# 'index': 1
# },
# children=html.Div([
# 'Drag and Drop or ',
# html.A('Select Files')
# ]),
# style={
# 'width': '100%',
# 'height': '60px',
# 'lineHeight': '60px',
# 'borderWidth': '1px',
# 'borderStyle': 'dashed',
# 'borderRadius': '5px',
# 'textAlign': 'center',
# 'margin': '10px'
# },
# ),
'LW': html.Button('Start/Stop Web-Cam Feed', id={
'type': 'input-data',
'index': 1},
n_clicks=0)
}
app.layout = html.Div([
html.H1('FACE MACK DETECTOR'),
html.H2('Mode of Use'),
dcc.Dropdown(
id='dropdown',
options=[
{'label': 'Upload Image', 'value': 'UI'},
#{'label': 'Upload Video', 'value': 'UV'},
{'label': 'Live Webcam', 'value': 'LW'}
],
placeholder="Select a mode",
),
html.Div(id='selected-mode'),
html.Hr(),
html.H2('Input Data'),
html.Div(id='input-pane'),
html.Hr(),
html.Button('Process', id='process-data',n_clicks=0),
html.Hr(),
html.H2('Output Data'),
html.Div(id='output-pane')
])
@app.callback(
Output(component_id='selected-mode',component_property= 'children'),
[Input(component_id='dropdown',component_property= 'value')])
def update_output(value):
if value=='UI':
return html.Div([
DYNAMIC_CONTROLS[value]])
# elif value=='UV':
# return html.Div([
# DYNAMIC_CONTROLS[value]])
elif value == 'LW':
return (DYNAMIC_CONTROLS[value])
@app.callback(
Output('input-pane', 'children'),
Input(component_id='dropdown',component_property= 'value'),
Input({'type': 'input-data', 'index': ALL}, 'contents'),
Input({'type': 'input-data', 'index': ALL}, 'n_clicks'),
State({'type': 'input-data', 'index': ALL}, 'filename'))
def update_output(value,contents,n_clicks,filename):
ctx=dash.callback_context
#ctx_msg = json.dumps({
# 'states': ctx.states,
# 'triggered': ctx.triggered,
# 'inputs': ctx.inputs
#}, indent=2)
if ctx.triggered:
usage_mode=ctx.inputs[ "dropdown.value"]
if usage_mode == 'UI':
filtered_list=list(filter(lambda x:x["prop_id"]=="{\"index\":0,\"type\":\"input-data\"}.contents",ctx.triggered))
if filtered_list:
img_contents=ctx.inputs["{\"index\":0,\"type\":\"input-data\"}.contents"]
img_filename=ctx.states["{\"index\":0,\"type\":\"input-data\"}.filename"]
if img_contents:
save_file(img_filename,img_contents)
return html.Div([html.H5(img_filename),
html.Img(src=img_contents)])
#elif value == 'UV':
# return html.Div([html.H5(filename),
# html.Video(src=contents)])
elif usage_mode == 'LW':
filtered_list = list(filter(lambda x: x["prop_id"] == "{\"index\":1,\"type\":\"input-data\"}.n_clicks", ctx.triggered))
if filtered_list:
n_clicks=ctx.inputs["{\"index\":1,\"type\":\"input-data\"}.n_clicks"]
if n_clicks%2 !=0:
return html.Div(html.Img(src="/video_feed"))
app.config.suppress_callback_exceptions = True
if __name__ == '__main__':
app.run_server(debug=True)
| StarcoderdataPython |
13279 | <filename>configutator/__version.py
__version__ = [1, 0, 2]
__versionstr__ = '.'.join([str(i) for i in __version__])
if __name__ == '__main__':
print(__versionstr__) | StarcoderdataPython |
1741003 | import re
import sys
from web3 import Web3
# TODO: This was copy-pasted from one of my other projects,
# need to refactor, add tests etc
regex = re.compile(r"error\s(\w+)\(.+\;")
def errors_on_file(path):
file = open(path, mode='r')
contents = file.read()
file.close()
return regex.findall(contents)
def add_error_sig_comments(path):
file = open(path, mode='r')
contents = file.read()
file.close()
errs = errors_on_file(path)
for err in errs:
sig = Web3.toHex(Web3.keccak(text="{}()".format(err)))[:10]
old = re.compile("^(.*@dev\\s)0x.*(\\n\\s*error\\s{}\\(\\);)".format(err), flags=re.M)
new = "\g<1>{}\g<2>".format(sig)
contents = re.sub(old, new, contents)
file = open(path, mode='w')
file.write(contents)
file.close()
print("Generated error sig comments on", path)
def main(args):
if len(args) < 2:
print("Usage: solcery errsig [<path_to_file_containing_error>]")
return
for path in sys.argv[2:]:
add_error_sig_comments(path)
if __name__=="__main__":
main(sys.argv)
| StarcoderdataPython |
1787209 | <filename>proj/pretrain_rnn.py
# Author: bbrighttaer
# Project: IReLeaSE
# Date: 3/23/2020
# Time: 12:03 PM
# File: pretrain.py
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import math
import os
import random
import time
from datetime import datetime as dt
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from soek import CategoricalParam, LogRealParam, RealParam, DiscreteParam, DataNode, RandomSearch, \
BayesianOptSearch
from soek.bopt import GPMinArgs
from soek.template import Trainer
from torch.utils.tensorboard import SummaryWriter
from tqdm import trange
from irelease.data import GeneratorData
from irelease.model import Encoder, StackRNN, RNNLinearOut, StackedRNNLayerNorm, StackedRNNDropout
from irelease.utils import Flags, parse_optimizer, ExpAverage, GradStats, Count, init_hidden, init_cell, init_stack, \
generate_smiles, time_since, get_default_tokens, canonical_smiles
currentDT = dt.now()
date_label = currentDT.strftime("%Y_%m_%d__%H_%M_%S")
seeds = [1]
if torch.cuda.is_available():
dvc_id = 0
use_cuda = True
device = f'cuda:{dvc_id}'
torch.cuda.set_device(dvc_id)
else:
device = 'cpu'
use_cuda = None
class IreleasePretrain(Trainer):
@staticmethod
def initialize(hparams, gen_data, *args, **kwargs):
gen_data.set_batch_size(hparams['batch_size'])
# Create main model
encoder = Encoder(vocab_size=gen_data.n_characters, d_model=hparams['d_model'],
padding_idx=gen_data.char2idx[gen_data.pad_symbol],
dropout=hparams['dropout'], return_tuple=True)
# Create RNN layers
rnn_layers = []
has_stack = True
for i in range(1, hparams['num_layers'] + 1):
rnn_layers.append(StackRNN(layer_index=i,
input_size=hparams['d_model'],
hidden_size=hparams['d_model'],
has_stack=has_stack,
unit_type=hparams['unit_type'],
stack_width=hparams['stack_width'],
stack_depth=hparams['stack_depth'],
k_mask_func=encoder.k_padding_mask))
if hparams['num_layers'] > 1:
rnn_layers.append(StackedRNNDropout(hparams['dropout']))
rnn_layers.append(StackedRNNLayerNorm(hparams['d_model']))
model = nn.Sequential(encoder,
*rnn_layers,
RNNLinearOut(out_dim=gen_data.n_characters,
hidden_size=hparams['d_model'],
bidirectional=False,
# encoder=encoder,
# dropout=hparams['dropout'],
bias=True))
if use_cuda:
model = model.cuda()
optimizer = parse_optimizer(hparams, model)
rnn_args = {'num_layers': hparams['num_layers'],
'hidden_size': hparams['d_model'],
'num_dir': 1,
'device': device,
'has_stack': has_stack,
'has_cell': hparams['unit_type'] == 'lstm',
'stack_width': hparams['stack_width'],
'stack_depth': hparams['stack_depth']}
return model, optimizer, gen_data, rnn_args
@staticmethod
def data_provider(k, flags):
tokens = get_default_tokens()
gen_data = GeneratorData(training_data_path=flags.data_file,
delimiter='\t',
cols_to_read=[0],
keep_header=True,
pad_symbol=' ',
max_len=120,
tokens=tokens,
use_cuda=use_cuda)
return {"train": gen_data, "val": gen_data, "test": gen_data}
@staticmethod
def evaluate(eval_dict, predictions, labels):
y_true = labels.cpu().detach().numpy()
y_pred = torch.max(predictions, dim=-1)[1]
y_pred = y_pred.cpu().detach().numpy()
acc = accuracy_score(y_true, y_pred)
eval_dict['accuracy'] = acc
return acc
@staticmethod
def train(model, optimizer, gen_data, rnn_args, n_iters=5000, sim_data_node=None, epoch_ckpt=(1, 2.0),
tb_writer=None, is_hsearch=False):
tb_writer = None # tb_writer()
start = time.time()
best_model_wts = model.state_dict()
best_score = -10000
best_epoch = -1
terminate_training = False
e_avg = ExpAverage(.01)
num_batches = math.ceil(gen_data.file_len / gen_data.batch_size)
n_epochs = math.ceil(n_iters / num_batches)
grad_stats = GradStats(model, beta=0.)
# learning rate decay schedulers
# scheduler = sch.StepLR(optimizer, step_size=500, gamma=0.01)
# pred_loss functions
criterion = nn.CrossEntropyLoss(ignore_index=gen_data.char2idx[gen_data.pad_symbol])
# sub-nodes of sim data resource
loss_lst = []
train_loss_node = DataNode(label="train_loss", data=loss_lst)
metrics_dict = {}
metrics_node = DataNode(label="validation_metrics", data=metrics_dict)
train_scores_lst = []
train_scores_node = DataNode(label="train_score", data=train_scores_lst)
scores_lst = []
scores_node = DataNode(label="validation_score", data=scores_lst)
# add sim data nodes to parent node
if sim_data_node:
sim_data_node.data = [train_loss_node, train_scores_node, metrics_node, scores_node]
try:
# Main training loop
tb_idx = {'train': Count(), 'val': Count(), 'test': Count()}
epoch_losses = []
epoch_scores = []
for epoch in range(6):
phase = 'train'
# Iterate through mini-batches
# with TBMeanTracker(tb_writer, 10) as tracker:
with grad_stats:
for b in trange(0, num_batches, desc=f'{phase} in progress...'):
inputs, labels = gen_data.random_training_set()
batch_size, seq_len = inputs.shape[:2]
optimizer.zero_grad()
# track history if only in train
with torch.set_grad_enabled(phase == "train"):
# Create hidden states for each layer
hidden_states = []
for _ in range(rnn_args['num_layers']):
hidden = init_hidden(num_layers=1, batch_size=batch_size,
hidden_size=rnn_args['hidden_size'],
num_dir=rnn_args['num_dir'], dvc=rnn_args['device'])
if rnn_args['has_cell']:
cell = init_cell(num_layers=1, batch_size=batch_size,
hidden_size=rnn_args['hidden_size'],
num_dir=rnn_args['num_dir'], dvc=rnn_args['device'])
else:
cell = None
if rnn_args['has_stack']:
stack = init_stack(batch_size, rnn_args['stack_width'],
rnn_args['stack_depth'], dvc=rnn_args['device'])
else:
stack = None
hidden_states.append((hidden, cell, stack))
# forward propagation
outputs = model([inputs] + hidden_states)
predictions = outputs[0]
predictions = predictions.permute(1, 0, -1)
predictions = predictions.contiguous().view(-1, predictions.shape[-1])
labels = labels.contiguous().view(-1)
# calculate loss
loss = criterion(predictions, labels)
# metrics
eval_dict = {}
score = IreleasePretrain.evaluate(eval_dict, predictions, labels)
# TBoard info
# tracker.track("%s/loss" % phase, loss.item(), tb_idx[phase].IncAndGet())
# tracker.track("%s/score" % phase, score, tb_idx[phase].i)
# for k in eval_dict:
# tracker.track('{}/{}'.format(phase, k), eval_dict[k], tb_idx[phase].i)
# backward pass
loss.backward()
optimizer.step()
# for epoch stats
epoch_losses.append(loss.item())
# for sim data resource
train_scores_lst.append(score)
loss_lst.append(loss.item())
# for epoch stats
epoch_scores.append(score)
print("\t{}: Epoch={}/{}, batch={}/{}, "
"pred_loss={:.4f}, accuracy: {:.2f}, sample: {}".format(time_since(start),
epoch + 1, n_epochs,
b + 1,
num_batches,
loss.item(),
eval_dict['accuracy'],
generate_smiles(
generator=model,
gen_data=gen_data,
init_args=rnn_args,
num_samples=1)
))
IreleasePretrain.save_model(model, './model_dir/', name=f'irelease-pretrained_stack-rnn_gru_'
f'{date_label}_epoch_{epoch}')
# End of mini=batch iterations.
except RuntimeError as e:
print(str(e))
duration = time.time() - start
print('\nModel training duration: {:.0f}m {:.0f}s'.format(duration // 60, duration % 60))
return {'model': model, 'score': round(np.mean(epoch_scores), 3), 'epoch': n_epochs}
@staticmethod
@torch.no_grad()
def evaluate_model(model, gen_data, rnn_args, sim_data_node=None, num_smiles=1000):
start = time.time()
model.eval()
# Samples SMILES
samples = []
step = 100
count = 0
for _ in range(int(num_smiles / step)):
samples.extend(generate_smiles(generator=model, gen_data=gen_data, init_args=rnn_args,
num_samples=step, is_train=False, verbose=True))
count += step
res = num_smiles - count
if res > 0:
samples.extend(generate_smiles(generator=model, gen_data=gen_data, init_args=rnn_args,
num_samples=res, is_train=False, verbose=True))
smiles, valid_vec = canonical_smiles(samples)
valid_smiles = []
invalid_smiles = []
for idx, sm in enumerate(smiles):
if len(sm) > 0:
valid_smiles.append(sm)
else:
invalid_smiles.append(samples[idx])
v = len(valid_smiles)
valid_smiles = list(set(valid_smiles))
print(f'Percentage of valid SMILES = {float(len(valid_smiles)) / float(len(samples)):.2f}, '
f'Num. samples = {len(samples)}, Num. valid = {len(valid_smiles)}, '
f'Num. requested = {num_smiles}, Num. dups = {v - len(valid_smiles)}')
# sub-nodes of sim data resource
smiles_node = DataNode(label="valid_smiles", data=valid_smiles)
invalid_smiles_node = DataNode(label='invalid_smiles', data=invalid_smiles)
# add sim data nodes to parent node
if sim_data_node:
sim_data_node.data = [smiles_node, invalid_smiles_node]
duration = time.time() - start
print('\nModel evaluation duration: {:.0f}m {:.0f}s'.format(duration // 60, duration % 60))
@staticmethod
def save_model(model, path, name):
os.makedirs(path, exist_ok=True)
file = os.path.join(path, name + ".mod")
torch.save(model.state_dict(), file)
@staticmethod
def load_model(path, name):
return torch.load(os.path.join(path, name), map_location=torch.device(device))
def main(flags):
sim_label = flags.exp_name if flags.exp_name else 'Irelease-pretraining-Stack-RNN'
if flags.eval:
sim_label += '_eval'
sim_data = DataNode(label=sim_label, metadata={'exp': flags.exp_name, 'date': date_label})
nodes_list = []
sim_data.data = nodes_list
# For searching over multiple seeds
hparam_search = None
for seed in seeds:
summary_writer_creator = lambda: SummaryWriter(log_dir="tb_gpmt"
"/{}_{}_{}/".format(sim_label, seed, dt.now().strftime(
"%Y_%m_%d__%H_%M_%S")))
# for data collection of this round of simulation.
data_node = DataNode(label="seed_%d" % seed)
nodes_list.append(data_node)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
print('-------------------------------------------------------------------------------------------------')
print(f'Running on dataset: {flags.data_file}, experiment = {flags.exp_name}')
print('-------------------------------------------------------------------------------------------------')
trainer = IreleasePretrain()
k = 1
if flags["hparam_search"]:
print("Hyperparameter search enabled: {}".format(flags["hparam_search_alg"]))
# arguments to callables
extra_init_args = {}
extra_data_args = {"flags": flags}
extra_train_args = {"is_hsearch": True,
"n_iters": 50000,
"tb_writer": summary_writer_creator}
hparams_conf = get_hparam_config(flags)
if hparam_search is None:
search_alg = {"random_search": RandomSearch,
"bayopt_search": BayesianOptSearch}.get(flags["hparam_search_alg"],
BayesianOptSearch)
search_args = GPMinArgs(n_calls=20, random_state=seed)
hparam_search = search_alg(hparam_config=hparams_conf,
num_folds=1,
initializer=trainer.initialize,
data_provider=trainer.data_provider,
train_fn=trainer.train,
save_model_fn=trainer.save_model,
alg_args=search_args,
init_args=extra_init_args,
data_args=extra_data_args,
train_args=extra_train_args,
data_node=data_node,
split_label='',
sim_label=sim_label,
dataset_label='ChEMBL_SMILES',
results_file="{}_{}_gpmt_{}.csv".format(
flags["hparam_search_alg"], sim_label, date_label))
stats = hparam_search.fit(model_dir="models", model_name='irelease')
print(stats)
print("Best params = {}".format(stats.best()))
else:
hyper_params = default_hparams(flags)
model, optimizer, gen_data, rnn_args = trainer.initialize(hyper_params,
gen_data=trainer.data_provider(k, flags)['train'])
if flags.eval:
load_model = trainer.load_model(flags.model_dir, flags.eval_model_name)
model.load_state_dict(load_model)
trainer.evaluate_model(model, gen_data, rnn_args, data_node, num_smiles=flags.num_smiles)
else:
if flags.init_model:
load_model = trainer.load_model(flags.model_dir, flags.init_model)
model.load_state_dict(load_model)
print(f'Model weights {flags.init_model} loaded successfully!')
results = trainer.train(model=model,
optimizer=optimizer,
gen_data=gen_data,
rnn_args=rnn_args,
n_iters=1500000,
sim_data_node=data_node,
tb_writer=summary_writer_creator)
trainer.save_model(results['model'], flags.model_dir,
name=f'irelease-pretrained_stack-rnn_{hyper_params["unit_type"]}_'
f'{date_label}_{results["score"]}_{results["epoch"]}')
# save simulation data resource tree to file.
sim_data.to_json(path="./analysis/")
def default_hparams(args):
return {
'unit_type': 'gru',
'num_layers': 2,
'dropout': 0.0,
'd_model': 1500,
'stack_width': 1500,
'stack_depth': 200,
'batch_size': 1,
# optimizer params
'optimizer': 'adadelta',
# 'optimizer__global__weight_decay': 0.00005,
'optimizer__global__lr': 0.001,
}
def get_hparam_config(args):
config = {
'unit_type': CategoricalParam(choices=['gru', 'lstm']),
'num_layers': DiscreteParam(min=1, max=10),
"d_model": DiscreteParam(min=32, max=1024),
"stack_width": DiscreteParam(min=10, max=128),
"stack_depth": DiscreteParam(min=10, max=64),
"dropout": RealParam(0.0, max=0.3),
"batch_size": CategoricalParam(choices=[32, 64, 128]),
# optimizer params
"optimizer": CategoricalParam(choices=["sgd", "adam", "adadelta", "adagrad", "adamax", "rmsprop"]),
"optimizer__global__weight_decay": LogRealParam(),
"optimizer__global__lr": LogRealParam(),
}
return config
if __name__ == '__main__':
parser = argparse.ArgumentParser('Pretraining of Memory-Augmented Transformer.')
parser.add_argument('-d', '--data',
type=str,
dest='data_file',
help='Train data file')
parser.add_argument('--model_dir',
type=str,
default='./model_dir',
help='Directory to store the log files in the training process.'
)
parser.add_argument("--hparam_search",
action="store_true",
help="If true, hyperparameter searching would be performed.")
parser.add_argument("--hparam_search_alg",
type=str,
default="bayopt_search",
help="Hyperparameter search algorithm to use. One of [bayopt_search, random_search]")
parser.add_argument("--eval",
action="store_true",
help="If true, a saved model is loaded and evaluated")
parser.add_argument("--eval_model_name",
default=None,
type=str,
help="The filename of the model to be loaded from the directory specified in --model_dir")
parser.add_argument('--exp_name', type=str,
help='Name for the experiment. This would be added to saved model names')
parser.add_argument("--init_model", help="Initial model weights")
parser.add_argument('--num_smiles', type=int, default=10000,
help='Number of SMILES to sample from a generator in eval mode', )
args = parser.parse_args()
flags = Flags()
args_dict = args.__dict__
for arg in args_dict:
setattr(flags, arg, args_dict[arg])
main(flags)
| StarcoderdataPython |
3392653 | import os
import random
import subprocess
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.conf import settings
# Django 1.11 removes the ability to supply a port range for liveserver tests,
# so we replicate that here. See: https://code.djangoproject.com/ticket/28212
# and https://code.djangoproject.com/ticket/26011
test_port_range = list(range(6080, 6580))
# Shuffle the ports so that repeated runs locally are unlikely to try to reopen
# a port in the TIME_WAIT state
random.shuffle(test_port_range)
available_test_ports = iter(test_port_range)
def use_saucelabs():
return os.environ.get("TRAVIS") or os.environ.get("USE_SAUCELABS")
def use_browserstack():
return os.environ.get("GITHUB_ACTIONS") or os.environ.get("USE_BROWSERSTACK")
@unittest.skipIf(
os.environ.get("TEST_SUITE") == "nonfunctional",
"nonfunctional tests specified in TEST_SUITE environment variable",
)
class SeleniumTestCase(StaticLiveServerTestCase):
host = "0.0.0.0"
display = None
@classmethod
def setUpClass(cls):
cls.port = next(available_test_ports)
try:
cls.browser = cls.get_browser()
except Exception:
if cls.display:
cls.display.stop()
raise
cls.browser.maximize_window()
cls.browser.implicitly_wait(1)
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def get_browser(cls):
if use_saucelabs():
return cls.get_saucelabs_browser()
elif use_browserstack():
return cls.get_browserstack_browser()
else:
if cls.use_xvfb():
from pyvirtualdisplay import Display
cls.display = Display(visible=0, size=(1200, 800))
cls.display.start()
return cls.get_firefox_driver()
@classmethod
def get_saucelabs_browser(cls):
browser, version, platform = os.environ["BROWSER"].split(":")
caps = {"browserName": browser}
caps["platform"] = platform
caps["version"] = version
caps["screenResolution"] = "1600x1200"
# Disable slow script warning in IE
caps["prerun"] = {
"executable": (
"https://raw.githubusercontent.com/"
"ebmdatalab/openprescribing/"
"master/scripts/setup_ie_8.bat"
),
"background": "false",
}
username = os.environ["SAUCE_USERNAME"]
access_key = os.environ["SAUCE_ACCESS_KEY"]
if os.environ.get("TRAVIS"):
caps["tunnel-identifier"] = os.environ.get("TRAVIS_JOB_NUMBER", "n/a")
caps["build"] = os.environ.get("TRAVIS_BUILD_NUMBER", "n/a")
caps["tags"] = ["CI"]
else:
caps["tags"] = ["from-dev-sandbox"]
if os.environ.get("TRAVIS") or os.path.exists("/.dockerenv"):
hub_url = "%s:%s@saucehost:4445" % (username, access_key)
else:
hub_url = "%s:%s@localhost:4445" % (username, access_key)
return webdriver.Remote(
desired_capabilities=caps, command_executor="http://%s/wd/hub" % hub_url
)
@classmethod
def get_browserstack_browser(cls):
browser, browser_version, browserstack_os, browserstack_os_version = os.environ[
"BROWSER"
].split(":")
local_identifier = os.environ["BROWSERSTACK_LOCAL_IDENTIFIER"]
caps = {
# 'browser' has precedence over 'browserName'
# 'browserName': browser,
"resolution": "1600x1200",
"browser": browser,
"browser_version": browser_version,
"os": browserstack_os,
"os_version": browserstack_os_version,
"browserstack.local": "true",
"browserstack.localIdentifier": local_identifier,
"project": os.environ["BROWSERSTACK_PROJECT_NAME"],
"name": os.environ["BROWSERSTACK_BUILD_NAME"],
}
# Disable slow script warning in IE
caps["prerun"] = {
"executable": (
"https://raw.githubusercontent.com/"
"ebmdatalab/openprescribing/"
"master/scripts/setup_ie_8.bat"
),
"background": "false",
}
username = os.environ["BROWSERSTACK_USERNAME"]
access_key = os.environ["BROWSERSTACK_ACCESS_KEY"]
hub_url = "https://%s:%s@hub-cloud.browserstack.com/wd/hub" % (
username,
access_key,
)
return webdriver.Remote(
desired_capabilities=caps, command_executor="%s" % hub_url
)
@classmethod
def use_xvfb(cls):
if not os.environ.get("SHOW_BROWSER", False):
return (
subprocess.call(
"type xvfb-run",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
== 0
)
else:
return False
@classmethod
def get_firefox_driver(cls):
return webdriver.Firefox(log_path="%s/logs/webdriver.log" % settings.REPO_ROOT)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
if cls.display:
cls.display.stop()
super(SeleniumTestCase, cls).tearDownClass()
def _find_and_wait(self, locator_type, locator, waiter):
if use_saucelabs():
wait = 60
else:
wait = 5
try:
element = WebDriverWait(self.browser, wait).until(
waiter((locator_type, locator))
)
return element
except TimeoutException:
raise AssertionError("Expected to find element %s" % locator)
def find_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.presence_of_element_located)
def find_visible_by_xpath(self, locator):
return self._find_and_wait(By.XPATH, locator, EC.visibility_of_element_located)
def find_by_css(self, locator):
return self._find_and_wait(
By.CSS_SELECTOR, locator, EC.presence_of_element_located
)
def find_visible_by_css(self, locator):
return self._find_and_wait(
By.CSS_SELECTOR, locator, EC.visibility_of_element_located
)
| StarcoderdataPython |
4805843 | <filename>jotter/set_remote.py
"""set_remote
The functionality for the jotter set-remote command.
Author:
Figglewatts <<EMAIL>>
"""
import git
from jotter import util
def run(remote_url: str) -> None:
"""Run the set-remote command.
Args:
remote_url: The remote URL to use.
"""
if not util.jotter_project_exists():
print(
"Unable to set remote, the current folder is not a jotter project."
)
raise SystemExit(1)
repo = git.Repo(path=".")
print("Setting remote...")
if "origin" in repo.remotes:
repo.remote("origin").set_url(remote_url)
else:
repo.create_remote("origin", remote_url) | StarcoderdataPython |
38284 | <reponame>MSLNZ/msl-loadlib
"""
Creates a 32-bit server to use for
`inter-process communication <https://en.wikipedia.org/wiki/Inter-process_communication>`_.
This module must be run from a 32-bit Python interpreter with PyInstaller_ installed.
If you want to re-freeze the 32-bit server, for example, if you want a 32-bit version of
:mod:`numpy` to be available on the server, then run the following with a 32-bit Python
interpreter that has the packages that you want to be available on the server installed
.. code-block:: pycon
>>> from msl.loadlib import freeze_server32
>>> freeze_server32.main() # doctest: +SKIP
.. _PyInstaller: https://www.pyinstaller.org/
.. _Python for .NET: https://pypi.python.org/pypi/pythonnet/
.. _comtypes: https://pythonhosted.org/comtypes/#
"""
import os
import sys
import shutil
import subprocess
try:
from urllib.request import urlopen
except ImportError: # then Python 2
from urllib import urlopen
try:
from msl import loadlib
except ImportError:
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from msl import loadlib
def main(spec=None, requires_pythonnet=True, requires_comtypes=True):
"""Creates a 32-bit Python server.
Uses PyInstaller_ to create a frozen 32-bit Python executable. This executable
starts a 32-bit server, :class:`~.server32.Server32`, which hosts a Python
module that can load a 32-bit library.
.. versionchanged:: 0.5
Added the `requires_pythonnet` and `requires_comtypes` arguments.
Parameters
----------
spec : :class:`str`, optional
If you want to freeze using a PyInstaller_ .spec file then you can specify the
path to the .spec file.
requires_pythonnet : :class:`bool`, optional
Whether `Python for .NET`_ must be available on the 32-bit server.
requires_comtypes : :class:`bool`, optional
Whether comtypes_ must be available on the 32-bit server. If you using a
non-Windows operating system then this argument is ignored.
"""
if loadlib.IS_PYTHON_64BIT:
print('Must run {} using a 32-bit Python interpreter'.format(os.path.basename(__file__)))
return
missing_packages = []
try:
import PyInstaller
except ImportError:
missing_packages.append('pyinstaller')
if requires_pythonnet:
try:
import clr
except ImportError:
missing_packages.append('pythonnet')
if loadlib.IS_WINDOWS and requires_comtypes:
try:
import comtypes
except ImportError:
missing_packages.append('comtypes')
except OSError:
# OSError: [WinError -2147417850] Cannot change thread mode after it is set
# don't care about this error since comtypes is indeed installed
pass
if missing_packages:
print('Packages are missing to be able to create the 32-bit server, run:')
print('pip install ' + ' '.join(missing_packages))
return
# start the freezing process
here = os.path.abspath(os.path.dirname(__file__))
cmd = [
# Specifically invoke pyinstaller in the context of the current
# python interpreter. This fixes the issue where the blind `pyinstaller`
# invocation points to a 64-bit version.
sys.executable,
'-m', 'PyInstaller',
'--distpath', here,
'--noconfirm',
]
if spec is None:
spec_file = '{}.spec'.format(loadlib.SERVER_FILENAME)
if os.path.exists(spec_file):
yn = input('A {0} file exists. You may want to run "python freeze_server32.py --spec {0}"\n'
'Do you want to continue and overwrite the spec file (y/[n])? '.format(spec_file))
if yn.lower() not in ('y', 'yes'):
print('Aborted.')
return
cmd.extend([
'--name', loadlib.SERVER_FILENAME,
'--onefile',
'--clean',
'--hidden-import', 'msl.examples.loadlib',
])
if requires_pythonnet:
cmd.extend(['--hidden-import', 'clr'])
if loadlib.IS_WINDOWS and requires_comtypes:
cmd.extend(['--hidden-import', 'comtypes'])
cmd.extend(_get_standard_modules())
cmd.append(os.path.join(here, 'start_server32.py'))
else:
cmd.append(spec)
subprocess.check_call(cmd)
# the --version-file option for pyinstaller does not currently work on Windows, this is a fix
verpatch = os.path.join(here, 'verpatch.exe')
if loadlib.IS_WINDOWS and os.path.isfile(verpatch):
ver = [verpatch,
os.path.join(here, loadlib.SERVER_FILENAME),
'/va', '{0}.{1}.{2}'.format(*loadlib.version_info) + '.0',
'/pv', '{0}.{1}.{2}.{4}'.format(*sys.version_info),
'/s', 'description', 'Access a 32-bit library from 64-bit Python',
'/s', 'product', 'Python 32-bit server',
'/s', 'copyright', loadlib.__copyright__]
subprocess.check_call(ver)
# cleanup
shutil.rmtree('./build/' + loadlib.SERVER_FILENAME)
if not os.listdir('./build'):
shutil.rmtree('./build')
if loadlib.IS_WINDOWS:
# pyinstaller is able to include Python.Runtime.dll and Python.Runtime.dll.config
# automatically in the build, so we don't need to keep the .spec file
os.remove(loadlib.SERVER_FILENAME + '.spec')
# create the .NET Framework config file
loadlib.utils.check_dot_net_config(os.path.join(here, loadlib.SERVER_FILENAME))
print('Server saved to: ' + os.path.join(here, loadlib.SERVER_FILENAME))
def _get_standard_modules():
"""
Returns a list of standard python modules to include and exclude in the
frozen application.
PyInstaller does not automatically bundle all of the standard Python modules
into the frozen application. This
method parses the 'docs.python.org' website for the list of standard Python
modules that are available.
The 'pyinstaller --exclude-module' option ensures that the module is
excluded from the frozen application.
The 'pyinstaller --hidden-import' option ensures that the module is included
into the frozen application (only if the module is available for the operating
system that is running this script).
Returns
-------
:class:`list` of :class:`str`
A list of modules to be included and excluded.
"""
# the frozen application is never meant to create GUIs or to add
# support for building and installing Python modules
ignore_list = ['__main__', 'distutils', 'ensurepip', 'test', 'tkinter', 'turtle']
# some modules are platform specific and got a
# RecursionError: maximum recursion depth exceeded
# when running this script with PyInstaller 3.3 installed
if loadlib.IS_WINDOWS:
os_ignore_list = ['(Unix)', '(Linux)', '(Linux, FreeBSD)']
elif loadlib.IS_LINUX:
os_ignore_list = ['(Windows)']
elif loadlib.IS_MAC:
os_ignore_list = ['(Windows)', '(Linux)', '(Linux, FreeBSD)']
else:
os_ignore_list = []
modules = []
url = 'https://docs.python.org/{0}.{1}/py-modindex.html'.format(*sys.version_info)
for s in urlopen(url).read().decode().split('#module-')[1:]:
m = s.split('"><code')
add_module = True
for x in os_ignore_list:
if x in m[1]:
ignore_list.append(m[0])
add_module = False
break
if add_module:
modules.append(m[0])
included_modules, excluded_modules = [], []
for module in modules:
include_module = True
for mod in ignore_list:
if module.startswith(mod):
excluded_modules.extend(['--exclude-module', module])
include_module = False
break
if include_module:
included_modules.extend(['--hidden-import', module])
return included_modules + excluded_modules
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Create the frozen 32-bit server.')
parser.add_argument(
'-s', '--spec',
help='the PyInstaller spec file to use'
)
parser.add_argument(
'--ignore-pythonnet',
action='store_true',
default=False,
help='ignore the error that pythonnet is not installed'
)
parser.add_argument(
'--ignore-comtypes',
action='store_true',
default=False,
help='ignore the error that comtypes is not installed'
)
args = parser.parse_args(sys.argv[1:])
sys.exit(
main(
spec=args.spec,
requires_pythonnet=not args.ignore_pythonnet,
requires_comtypes=not args.ignore_comtypes
)
)
| StarcoderdataPython |
3200221 | from xml.etree import ElementTree
def parse_nsrr_annotations(file_path):
tree = ElementTree.parse(file_path)
root = tree.getroot()
return root
def parse_nsrr_scored_events(file_path):
root = parse_nsrr_annotations(file_path)
scored_events = root.find('ScoredEvents').getchildren()
return scored_events
def parse_nsrr_sleep_stages(file_path):
events = parse_nsrr_scored_events(file_path)
sleep_stages = [event for event in events if event.find('EventType').text == 'Stages|Stages']
return sleep_stages
| StarcoderdataPython |
3383326 | import numpy as np
import matplotlib.pyplot as plt
import lightFunctions as light
dataPath = '12-light/data/'
plotsPath = '12-light/plots/'
########################################
# Wavelength calibration
########################################
filename = 'white-mercury.png'
brightness = light.readIntensity(dataPath + filename, plotsPath + filename, 'ртутная лампа', 'белый лист')
mercuryPeaks = [435.8328, 546.0735, 578.2]
mercuryPixels = [167, 340, 409]
linearFit = np.polyfit(mercuryPixels, mercuryPeaks, 1)
wavelength = np.polyval(linearFit, np.arange(0, np.size(brightness)))
########################################
# Photo processing
########################################
# Read intensities
lamp = 'лампа накаливания'
white = ['white-tungsten.png', 'белый лист']
red = ['red-tungsten.png', 'красный лист']
green = ['green-tungsten.png', 'зелёный лист']
blue = ['blue-tungsten.png', 'синий лист']
yellow = ['yellow-tungsten.png', 'жёлтый лист']
whiteBrightness = light.readIntensity(dataPath + white[0], plotsPath + white[0], lamp, white[1])
redBrightness = light.readIntensity(dataPath + red[0], plotsPath + red[0], lamp, red[1])
greenBrightness = light.readIntensity(dataPath + green[0], plotsPath + green[0], lamp, green[1])
blueBrightness = light.readIntensity(dataPath + blue[0], plotsPath + blue[0], lamp, blue[1])
yellowBrightness = light.readIntensity(dataPath + yellow[0], plotsPath + yellow[0], lamp, yellow[1])
# Plot intensites
fig = plt.figure(figsize=(7, 5), dpi=200)
plt.title('Отражённая интенсивность\nизлучения лампы накаливания')
plt.xlabel('Длина волны [нм]')
plt.ylabel('Яркость')
plt.plot(wavelength, whiteBrightness, 'w', label=white[1])
plt.plot(wavelength, redBrightness, 'r', label=red[1])
plt.plot(wavelength, greenBrightness, 'g', label=green[1])
plt.plot(wavelength, blueBrightness, 'b', label=blue[1])
plt.plot(wavelength, yellowBrightness, 'y', label=yellow[1])
plt.legend()
ax = plt.gca()
ax.set_facecolor('lightgrey')
plt.grid(which='major')
plt.grid(which='minor', linestyle='--')
plt.minorticks_on()
plt.savefig(plotsPath + '! intensites.png')
# Plot albedos
albedoOne = whiteBrightness
albedoOne[whiteBrightness < 1] = 1
fig = plt.figure(figsize=(7, 5), dpi=200)
plt.title('Альбедо поверхностей')
plt.xlabel('Длина волны [нм]')
plt.ylabel('Альбедо')
plt.plot(wavelength, whiteBrightness / albedoOne, 'w', label=white[1])
plt.plot(wavelength, redBrightness / albedoOne, 'r', label=red[1])
plt.plot(wavelength, greenBrightness / albedoOne, 'g', label=green[1])
plt.plot(wavelength, blueBrightness / albedoOne, 'b', label=blue[1])
plt.plot(wavelength, yellowBrightness / albedoOne, 'y', label=yellow[1])
plt.legend()
ax = plt.gca()
ax.set_facecolor('lightgrey')
plt.grid(which='major')
plt.grid(which='minor', linestyle='--')
plt.minorticks_on()
plt.savefig(plotsPath + '! albedos.png') | StarcoderdataPython |
155712 | # Licensed under the Unlicense (http://unlicense.org)
# Made by <EMAIL>
# Chat Program
import socket
import threading
# Config
MODE = "SERVER" # Modes can either be SERVER or CLIENT
HOST = "" # Symbolic name meaning all available interfaces
PORT = 1337 # Arbitrary non-privileged port (> 1024)
chatName = "Default" # Your name
friendName = "" # Leave this blank
connected = False # Leave this False
def send():
while True:
message = input(chatName + ": ")
data = message.encode()
conn.sendall(data)
def recv():
while True:
data = conn.recv(1024)
if not data: break
print(friendName.decode() + ":",data.decode())
if (MODE == ''):
MODE = "CLIENT"
if (chatName == ''):
chatName = input("Enter Name: ")
if (MODE=="SERVER"):
print("Starting in server mode...")
while not connected:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
print("Server Started: Listening")
conn, addr = s.accept()
friendName = conn.recv(1024)
conn.sendall(chatName.encode())
print(friendName.decode(), 'connected by', addr)
connected = True
break
except:
nothing = input("ERROR: Failed to bind to port: " + str(PORT) + ". Press Enter to try again...")
continue
if (MODE=="CLIENT"):
while not connected:
print("Starting in client mode...")
try:
if(HOST == ''):
HOST = input("Host IP: ")
if(PORT == ''):
PORT = 1337
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((HOST, PORT))
connected = True
conn.sendall(chatName.encode("utf-8"))
friendName = conn.recv(1024)
print("Connected to " + friendName.decode())
connected = True
break
except:
nothing = input("Error occurred when attempting to connect. Press Enter to try again...")
continue
thread_list = []
thread_list.append(threading.Thread(target = recv))
thread_list.append(threading.Thread(target = send))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
| StarcoderdataPython |
3398392 | <reponame>jerryrwu/harvest<filename>plugins/redacted/utils.py<gh_stars>1-10
import html
import re
from dataclasses import dataclass
import bs4
from upload_studio.upload_metadata import MusicMetadata
class JoinedArtistsBuilder(object):
def __init__(self, joined_artists_builder=None):
if joined_artists_builder is None:
self.result = []
else:
self.result = list(joined_artists_builder.result)
def append_joined(self, join_string, artists):
for a in artists:
self.result.append({
'id': a['id'],
'name': a['name'],
'join': join_string,
})
self.result[-1]['join'] = ''
def append_artist(self, artist):
self.result.append({
'id': artist['id'],
'name': html.unescape(artist['name']),
'join': '',
})
def append_join(self, join_string):
assert not self.result[-1]['join'], 'Last join should be empty before adding a new join'
self.result[-1]['join'] = join_string
def clear(self):
self.result = []
def get_artists_list(music_info):
a_main = music_info['artists']
a_composers = music_info['composers']
a_conductors = music_info['conductor']
a_djs = music_info['dj']
if len(a_main) == 0 and len(a_conductors) == 0 and len(a_djs) == 0 and len(a_composers) == 0:
return []
builder = JoinedArtistsBuilder()
if len(a_composers) and len(a_composers) < 3:
builder.append_joined(' & ', a_composers)
if len(a_composers) < 3 and len(a_main) > 0:
builder.append_join(' performed by ')
composer_builder = JoinedArtistsBuilder(builder)
if len(a_main):
if len(a_main) <= 2:
builder.append_joined(' & ', a_main)
else:
builder.append_artist({'id': -1, 'name': 'Various Artists'})
if len(a_conductors):
if (len(a_main) or len(a_composers)) and (len(a_composers) < 3 or len(a_main)):
builder.append_join(' under ')
if len(a_conductors) <= 2:
builder.append_joined(' & ', a_conductors)
else:
builder.append_artist({'id': -1, 'name': 'Various Conductors'})
if len(a_composers) and len(a_main) + len(a_conductors) > 3 and len(a_main) > 1 and len(
a_conductors) > 1:
builder = composer_builder
builder.append_artist({'id': -1, 'name': 'Various Artists'})
elif len(a_composers) > 2 and len(a_main) + len(a_conductors) == 0:
builder.clear()
builder.append_artist({'id': -1, 'name': 'Various Composers'})
if len(a_djs):
if len(a_djs) <= 2:
builder.clear()
builder.append_joined(' & ', a_djs)
else:
builder.clear()
builder.append_artist({'id': -1, 'name': 'Various DJs'})
return builder.result
def get_joined_artists(music_info):
artists_list = get_artists_list(music_info)
result = []
for a in artists_list:
result.append(a['name'])
result.append(a['join'])
return ''.join(result)
def get_shorter_joined_artists(music_info, group_name):
artists = get_joined_artists(music_info)
if len(artists) + len(group_name) > 80:
if music_info['artists']:
if len(music_info['artists']) > 1:
artists = 'Various Artists'
else:
artists = music_info['artists'][0]['name']
elif music_info['conductor']:
if len(music_info['conductor']) > 1:
artists = 'Various Conductors'
else:
artists = music_info['conductor'][0]['name']
return artists
def extract_upload_errors(html):
soup = bs4.BeautifulSoup(html, 'html5lib')
return soup.find('p', attrs={'style': 'color: red; text-align: center;'}).text.strip()
_ENCODING_PREFERENCES = [
MusicMetadata.ENCODING_320,
MusicMetadata.ENCODING_V0,
MusicMetadata.ENCODING_LOSSLESS,
MusicMetadata.ENCODING_24BIT_LOSSLESS,
]
def select_best_torrents_from_torrent_dicts(torrents):
def _is_torrent_better(a, b):
try:
a_index = _ENCODING_PREFERENCES.index(a['encoding'])
except ValueError:
a_index = -1
try:
b_index = _ENCODING_PREFERENCES.index(b['encoding'])
except ValueError:
b_index = -1
if a_index > b_index:
return True
if a_index < b_index:
return False
return a['size'] > b['size']
best_torrents = {}
for torrent in torrents:
key = (
torrent['remasterYear'],
torrent['remasterTitle'],
torrent['remasterRecordLabel'],
torrent['remasterCatalogueNumber'],
)
if not best_torrents.get(key) or _is_torrent_better(torrent, best_torrents[key]):
best_torrents[key] = torrent
return list(best_torrents.values())
@dataclass
class RedactedFileInfo:
name: str
size: int
def parse_file_list(file_list):
items = file_list.split('|||')
files = []
for item in items:
m = re.match('(.*){{{([0-9]*)}}}', item)
files.append(RedactedFileInfo(m[1], int(m[2])))
return files
| StarcoderdataPython |
3217315 | <gh_stars>0
def split_line(line, length):
items = [
line[length * i : length * (i + 1)] for i in range(len(line) // length + 1)
]
return [i for i in items if i.lstrip()]
| StarcoderdataPython |
1657996 | <reponame>racheliurui/vscode-hello-python
#!/usr/bin/env python3
from ev3dev2.sound import Sound
sound = Sound()
def speakout(message):
sound.speak(message)
| StarcoderdataPython |
3359680 | <gh_stars>1-10
"""Definition of the ModelConfiguration class."""
import os
from dataclasses import dataclass
from typing import Dict, Optional
@dataclass
class ModelConfiguration:
"""Container for a path to a Stan model and some configuration.
For example, you may want to compare how well two stan programs fit the
same data, or how well the same model fits the data with different
covariates.
:param name: string name identifying the model configuration
:param stan_file: Path to a Stan program, with "/" even on windows
:param data_dir: Path to a directory containing prepared data
:param sample_kwargs: dictionary of keyword arguments to
cmdstanpy.CmdStanModel.sample.
:param run_cross_validation: whether or not to run cross validation
"""
name: str
stan_file: str
data_dir: str
sample_kwargs: dict
run_cross_validation: bool = True
sample_kwargs_cross_validation: Optional[Dict] = None
def __post_init__(self) -> None:
"""Handle windows paths correctly"""
self.stan_file = os.path.join(*self.stan_file.split("/"))
self.data_dir = os.path.join(*self.data_dir.split("/"))
| StarcoderdataPython |
3387955 | <reponame>stoman/gym-cards
from gym.envs.registration import register
register(
id='Wizards-v0',
entry_point='gym_cards.envs:WizardsEnv',
) | StarcoderdataPython |
1649692 | <gh_stars>0
def func(a, b, c):
deter = b**2 - 4*a*c
if(deter > 0):
print("Distinct real roots")
elif(deter == 0):
print("One real root")
else:
print("Complex root")
a = int(input("First Coef"))
b = int(input("Second Coef"))
c = int(input("Third Coef"))
func(a, b, c)
| StarcoderdataPython |
118136 | from django.core.management.base import BaseCommand
from main.views import get_csv
class Command(BaseCommand):
help = 'Gets data from CSV file'
def handle(self, *args, **options):
get_csv()
self.stdout.write(self.style.SUCCESS('Successfully imported data from CSV'))
| StarcoderdataPython |
3266236 | def replace_text(file,old_word,new_word):
file_data = ''
with open(file)as f:
for i in f:
if old_word in i:
i=i.replace(old_word,new_word)
file_data+=i
with open(file,'w',encoding='utf-8')as f:
f.write(file_data)
replace_text('test_replace.txt','test2','archer') | StarcoderdataPython |
27891 | n = int(input())
c = [0]*n
for i in range(n):
l = int(input())
S = input()
for j in range(l):
if (S[j]=='0'):
continue
for k in range(j,l):
if (S[k]=='1'):
c[i] = c[i]+1
for i in range(n):
print(c[i])
| StarcoderdataPython |
41914 | <filename>wxpusher/tests/test_send_message.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Unittest for sending message.
File: test_send_message.py
Author: huxuan
Email: <EMAIL>
"""
import unittest
from wxpusher import WxPusher
from . import config
class TestSendMessage(unittest.TestCase):
"""Unittest for sending message."""
@classmethod
def setUpClass(cls):
"""Set up for class."""
WxPusher.default_token = config.TOKEN
def test_send_message_uid(self):
"""Positive case for sending message with uid."""
res = WxPusher.send_message(
self.test_send_message_uid.__doc__,
uids=config.UIDS,
url='http://example.com/',
)
self.assertIsInstance(res, dict)
self.assertIn('code', res)
self.assertEqual(1000, res['code'])
def test_send_message_topic_id(self):
"""Positive case for sending message with topic_id."""
res = WxPusher.send_message(
self.test_send_message_topic_id.__doc__,
topic_ids=config.TOPIC_IDS,
url='http://example.com/',
)
self.assertIsInstance(res, dict)
self.assertIn('code', res)
self.assertEqual(1000, res['code'])
| StarcoderdataPython |
25772 | # $language = "python"
# $interface = "1.0"
# ################################################ SCRIPT INFO ###################################################
# Author: <NAME>
# Email: <EMAIL>
#
# This script will grab the route table information from a Cisco IOS or NXOS device and export details about each
# next-hop address (how many routes and from which protocol) into a CSV file. It will also list all connected networks
# and give a detailed breakdown of every route that goes to each next-hop.
#
#
# ################################################ SCRIPT SETTING ###################################################
#
# Global settings that affect all scripts (output directory, date format, etc) is stored in the "global_settings.json"
# file in the "settings" directory.
#
# If any local settings are used for this script, they will be stored in the same settings folder, with the same name
# as the script that uses them, except ending with ".json".
#
# All settings can be manually modified in JSON format (the same syntax as Python lists and dictionaries). Be aware of
# required commas between items, or else options are likely to get run together and break the script.
#
# **IMPORTANT** All paths saved in .json files must contain either forward slashes (/home/jcaesar) or
# DOUBLE back-slashes (C:\\Users\\Jamie). Single backslashes will be considered part of a control character and will
# cause an error on loading.
#
# ################################################ IMPORTS ###################################################
import os
import sys
import logging
# If the "crt" object exists, this is being run from SecureCRT. Get script directory so we can add it to the
# PYTHONPATH, which is needed to import our custom modules.
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
os.chdir(script_dir)
# Now we can import our custom modules
import securecrt_tools.sessions as sessions
import securecrt_tools.settings as settings
import securecrt_tools.utilities as utils
import securecrt_tools.ipaddress as ipaddress
# ################################################ LOAD SETTINGS ###################################################
session_set_filename = os.path.join(script_dir, "settings", settings.global_settings_filename)
session_settings = settings.SettingsImporter(session_set_filename, settings.global_defs)
# Set logger variable -- this won't be used unless debug setting is True
logger = logging.getLogger("securecrt")
# ################################################ SCRIPT ###################################################
def update_empty_interfaces(route_table):
"""
Takes the routes table as a list of dictionaries (with dict key names used in parse_routes function) and does
recursive lookups to find the outgoing interface for those entries in the route-table where the outgoing interface
isn't listed.
:param route_table: <list> A list of dictionaries - specifically with the keys 'network', 'protocol', 'nexthop'
and 'interface
:return: The updated route_table object with outbound interfaces filled in.
"""
def recursive_lookup(nexthop):
for network in connected:
if nexthop in network:
return connected[network]
for network in statics:
if nexthop in network:
return recursive_lookup(statics[network])
return None
logger.debug("STARTING update_empty_interfaces")
connected = {}
unknowns = {}
statics = {}
for route in route_table:
if route['protocol'] == 'connected':
connected[route['network']] = route['interface']
if route['protocol'] == 'static':
if route['nexthop']:
statics[route['network']] = route['nexthop']
if route['nexthop'] and not route['interface']:
unknowns[route['nexthop']] = None
for nexthop in unknowns:
unknowns[nexthop] = recursive_lookup(nexthop)
for route in route_table:
if not route['interface']:
if route['nexthop'] in unknowns:
route['interface'] = unknowns[route['nexthop']]
logger.debug("ENDING update_empty_interfaces")
def parse_routes(fsm_routes):
"""
This function will take the TextFSM parsed route-table from the `textfsm_parse_to_dict` function. Each dictionary
in the TextFSM output represents a route entry. Each of these dictionaries will be updated to convert IP addresses
into ip_address or ip_network objects (from the ipaddress.py module). Some key names will also be updated also.
:param fsm_routes: <list of dicts> TextFSM output from the `textfsm_parse_to_dict` function.
:return: <list of dicts> An updated list of dictionaries that replaces IP address strings with objects from the
ipaddress.py module from Google.
"""
logger.debug("STARTING parse_routes function.")
complete_table = []
for route in fsm_routes:
new_entry = {}
logger.debug("Processing route entry: {0}".format(str(route)))
new_entry['network'] = ipaddress.ip_network(u"{0}/{1}".format(route['NETWORK'], route['MASK']))
new_entry['protocol'] = utils.normalize_protocol(route['PROTOCOL'])
if route['NEXTHOP_IP'] == '':
new_entry['nexthop'] = None
else:
new_entry['nexthop'] = ipaddress.ip_address(unicode(route['NEXTHOP_IP']))
if route["NEXTHOP_IF"] == '':
new_entry['interface'] = None
else:
new_entry['interface'] = route['NEXTHOP_IF']
# Nexthop VRF will only occur in NX-OS route tables (%vrf-name after the nexthop)
if 'NEXTHOP_VRF' in route:
if route['NEXTHOP_VRF'] == '':
new_entry['vrf'] = None
else:
new_entry['vrf'] = route['NEXTHOP_VRF']
logger.debug("Adding updated route entry '{0}' based on the information: {1}".format(str(new_entry),
str(route)))
complete_table.append(new_entry)
update_empty_interfaces(complete_table)
logger.debug("ENDING parse_route function")
return complete_table
def nexthop_summary(textfsm_dict):
"""
A function that builds a CSV output (list of lists) that displays the summary information after analyzing the
input route table.
:param textfsm_dict:
:return:
"""
# Identify connected or other local networks -- most found in NXOS to exlude from next-hops. These are excluded
# from the nexthop summary (except connected has its own section in the output).
logger.debug("STARTING nexthop_summary function")
local_protos = ['connected', 'local', 'hsrp', 'vrrp', 'glbp']
# Create a list of all dynamic protocols from the provided route table. Add total and statics to the front.
proto_list = []
for entry in textfsm_dict:
if entry['protocol'] not in proto_list and entry['protocol'] not in local_protos:
logger.debug("Found protocol '{0}' in the table".format(entry['protocol']))
proto_list.append(entry['protocol'])
proto_list.sort(key=utils.human_sort_key)
proto_list.insert(0, 'total')
proto_list.insert(0, 'interface')
# Create dictionaries to store summary information as we process the route table.
summary_table = {}
connected_table = {}
detailed_table = {}
# Process the route table to populate the above 3 dictionaries.
for entry in textfsm_dict:
logger.debug("Processing route: {0}".format(str(entry)))
# If the route is connected, local or an FHRP entry
if entry['protocol'] in local_protos:
if entry['protocol'] == 'connected':
if entry['interface'] not in connected_table:
connected_table[entry['interface']] = []
connected_table[entry['interface']].append(str(entry['network']))
else:
if entry['nexthop']:
if 'vrf' in entry and entry['vrf']:
nexthop = "{0}%{1}".format(entry['nexthop'], entry['vrf'])
else:
nexthop = str(entry['nexthop'])
elif entry['interface'].lower() == "null0":
nexthop = 'discard'
if nexthop not in summary_table:
# Create an entry for this next-hop, containing zero count for all protocols.
summary_table[nexthop] = {}
summary_table[nexthop].update(zip(proto_list, [0] * len(proto_list)))
summary_table[nexthop]['interface'] = entry['interface']
# Increment total and protocol specific count
summary_table[nexthop][entry['protocol']] += 1
summary_table[nexthop]['total'] += 1
if nexthop not in detailed_table:
detailed_table[nexthop] = []
detailed_table[nexthop].append((str(entry['network']), entry['protocol']))
# Convert summary_table into a format that can be printed to the CSV file.
output = []
header = ["Nexthop", "Interface", "Total"]
header.extend(proto_list[2:])
output.append(header)
summary_keys = sorted(summary_table.keys(), key=utils.human_sort_key)
for key in summary_keys:
line = [key]
for column in proto_list:
line.append(summary_table[key][column])
output.append(line)
output.append([])
# Convert the connected_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Connected:"])
output.append(["Interface", "Network(s)"])
connected_keys = sorted(connected_table.keys(), key=utils.human_sort_key)
for key in connected_keys:
line = [key]
for network in connected_table[key]:
line.append(network)
output.append(line)
output.append([])
# Convert the detailed_table into a format that can be printed to the CSV file (and append to output)
output.append([])
output.append(["Route Details"])
output.append(["Nexthop", "Network", "Protocol"])
detailed_keys = sorted(detailed_table.keys(), key=utils.human_sort_key)
for key in detailed_keys:
for network in detailed_table[key]:
line = [key]
line.extend(list(network))
output.append(line)
output.append([])
# Return the output, ready to be sent to directly to a CSV file
logger.debug("ENDING nexthop_summary function")
return output
def script_main(session):
supported_os = ["IOS", "NXOS"]
if session.os not in supported_os:
logger.debug("Unsupported OS: {0}. Exiting program.".format(session.os))
session.message_box("{0} is not a supported OS for this script.".format(session.os), "Unsupported OS",
options=sessions.ICON_STOP)
return
else:
send_cmd = "show ip route"
selected_vrf = session.prompt_window("Enter the VRF name. (Leave blank for default VRF)")
if selected_vrf != "":
send_cmd = send_cmd + " vrf {0}".format(selected_vrf)
session.hostname = session.hostname + "-VRF-{0}".format(selected_vrf)
logger.debug("Received VRF: {0}".format(selected_vrf))
raw_routes = session.get_command_output(send_cmd)
if session.os == "IOS":
template_file = "textfsm-templates/cisco_ios_show_ip_route.template"
else:
template_file = "textfsm-templates/cisco_nxos_show_ip_route.template"
fsm_results = utils.textfsm_parse_to_dict(raw_routes, template_file)
route_list = parse_routes(fsm_results)
output_filename = session.create_output_filename("nexthop-summary", ext=".csv")
output = nexthop_summary(route_list)
utils.list_of_lists_to_csv(output, output_filename)
# Clean up before closing session
session.end()
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, create our session object using the "crt" object provided by SecureCRT
if __name__ == "__builtin__":
# Create a session object for this execution of the script and pass it to our main() function
crt_session = sessions.CRTSession(crt, session_settings)
script_main(crt_session)
# Else, if this script is run directly then create a session object without the SecureCRT API (crt object) This would
# be done for debugging purposes (running the script outside of SecureCRT and feeding it the output it failed on)
elif __name__ == "__main__":
direct_session = sessions.DirectSession(os.path.realpath(__file__), session_settings)
script_main(direct_session) | StarcoderdataPython |
3243551 | <filename>data-structures/linked_list.py
class Element(object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
# Add new element to the tail of the linked list
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_element(self, position):
# Get element by position (1 based)
counter = 1
current = self.head
if position < 1:
return None
while current and counter <= position:
if counter == position:
return current
current = current.next
counter += 1
return None
def insert(self, new_element, position):
# Insert new element at position (1 based)
counter = 1
current = self.head
if position > 1:
while current and counter < position:
if counter == position - 1:
new_element.next = current.next
current.next = new_element
current = current.next
counter += 1
elif position == 1:
new_element.next = self.head
self.head = new_element
def delete(self, value):
# Delete first element with the given value
current = self.head
previous = None
while current.value != value and current.next:
previous = current
current = current.next
if current.value == value:
if previous:
previous.next = current.next
else:
self.head = current.next
def insert_first(self, new_element):
# Insert new element as head
new_element.next = self.head
self.head = new_element
def delete_first(self):
# Delete first element and return it
if self.head:
deleted_element = self.head
temp = deleted_element.next
self.head = temp
return deleted_element
else:
return None | StarcoderdataPython |
3288692 | """This contains all of the views for the Ghostwriter application's
various webpages.
"""
# Import logging functionality
import logging
# Django imports for generic views and template rendering
from django.urls import reverse
from django.views import generic
from django.core.files import File
from django.shortcuts import render
from django.contrib import messages
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView, DeleteView
# Imports for Signals
from django.db.models.signals import post_init, post_save
from django.dispatch import receiver
# Django imports for verifying a user is logged-in to access a view
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# Django imports for forms
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
# Import for references to Django's settings.py and storage
from django.conf import settings
# Import models and forms
from django.db.models import Q
from django.contrib.auth import get_user_model
User = get_user_model()
# from rolodex.models import Project, ProjectAssignment
from .models import (
Finding, Severity, FindingType, Report,
ReportFindingLink, Evidence, Archive,
FindingNote, LocalFindingNote)
from .forms import (
FindingCreateForm, ReportCreateForm,
ReportFindingLinkUpdateForm, EvidenceForm,
FindingNoteCreateForm, LocalFindingNoteCreateForm)
# Import model filters for views
from .filters import FindingFilter, ReportFilter, ArchiveFilter
# Import Python libraries for various things
import io
import os
import csv
import zipfile
# Import for generating the xlsx reports in memory
from xlsxwriter.workbook import Workbook
# Import custom modules
from modules import reportwriter
# Setup logger
logger = logging.getLogger(__name__)
#####################
# Signals Functions #
#####################
@receiver(post_init, sender=Evidence)
def backup_evidence_path(sender, instance, **kwargs):
"""Backup the old evidence file's path so it can be cleaned up after the
new file is uploaded.
"""
instance._current_evidence = instance.document
@receiver(post_save, sender=Evidence)
def delete_old_evidence(sender, instance, **kwargs):
"""Delete the old evidence file when it is replaced."""
if hasattr(instance, '_current_evidence'):
if instance._current_evidence != instance.document.path:
try:
os.remove(instance._current_evidence.path)
except Exception:
pass
##################
# View Functions #
##################
@login_required
def index(request):
"""View function to redirect empty requests to the dashboard."""
return HttpResponseRedirect(reverse('home:dashboard'))
@login_required
def findings_list(request):
"""View showing all available findings. This view defaults to the
finding_list.html template and allows for filtering.
"""
# Check if a search parameter is in the request
try:
search_term = request.GET.get('finding_search')
except Exception:
search_term = ''
if search_term:
messages.success(request, 'Displaying search results for: %s' %
search_term, extra_tags='alert-success')
findings_list = Finding.objects.\
select_related('severity', 'finding_type').\
filter(Q(title__icontains=search_term) |
Q(description__icontains=search_term)).\
order_by('severity__weight', 'finding_type', 'title')
else:
findings_list = Finding.objects.\
select_related('severity', 'finding_type').\
all().order_by('severity__weight', 'finding_type', 'title')
findings_filter = FindingFilter(request.GET, queryset=findings_list)
return render(request, 'reporting/finding_list.html',
{'filter': findings_filter})
@login_required
def reports_list(request):
"""View showing all reports. This view defaults to the report_list.html
template and allows for filtering.
"""
reports_list = Report.objects.select_related('created_by').all().\
order_by('complete', 'title')
reports_filter = ReportFilter(request.GET, queryset=reports_list)
return render(request, 'reporting/report_list.html',
{'filter': reports_filter})
@login_required
def archive_list(request):
"""View showing all archived reports. This view defaults to the
report_list.html template and allows for filtering.
"""
archive_list = Archive.objects.select_related('project__client').all().\
order_by('project__client')
archive_filter = ArchiveFilter(request.GET, queryset=archive_list)
return render(request, 'reporting/archives.html',
{'filter': archive_filter})
@login_required
def import_findings(request):
"""View function for uploading and processing csv files and importing
findings.
"""
# If the request is 'GET' return the upload page
if request.method == 'GET':
return render(request, 'reporting/findings_import.html')
# If not a GET, then proceed
try:
# Get the `csv_file` from the POSTed form data
csv_file = request.FILES['csv_file']
# Do a lame/basic check to see if this is a csv file
if not csv_file.name.endswith('.csv'):
messages.error(request, 'Your file is not a csv!',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:import_findings'))
# The file is loaded into memory, so we must be aware of system limits
if csv_file.multiple_chunks():
messages.error(request, 'Uploaded file is too big (%.2f MB).' %
(csv_file.size/(1000*1000)),
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:import_findings'))
# General catch-all if something goes terribly wrong
except Exception as e:
messages.error(request, 'Unable to upload/read file: ' + repr(e),
extra_tags='alert-danger')
logging.getLogger('error_logger').\
error('Unable to upload/read file. ' + repr(e))
# Loop over the lines and save the domains to the Finding model
try:
# Try to read the file data from memory
csv_file_wrapper = io.StringIO(csv_file.read().decode())
csv_reader = csv.DictReader(csv_file_wrapper, delimiter=',')
except Exception as e:
messages.error(request, 'Unable to parse file: ' + repr(e),
extra_tags='alert-danger')
logging.getLogger('error_logger').\
error('Unable to parse file. ' + repr(e))
return HttpResponseRedirect(reverse('reporting:import_findings'))
try:
error_count = 0
# Process each csv row and commit it to the database
for entry in csv_reader:
if error_count > 5:
raise Exception("Too many errors. Discontinuing import.")
title = entry.get('title', None)
if title is None:
messages.error(request, 'Missing title field', extra_tags='alert-danger')
logging.getLogger('error_logger').error('Missing title field')
error_count += 1
continue
logging.getLogger('error_logger').info('Adding %s to the database',
entry['title'])
# Create a Severity object for the provided rating (e.g. High)
severity_entry = entry.get('severity', "Informational")
try:
severity = Severity.objects.get(severity__iexact=severity_entry)
except Severity.DoesNotExist:
severity = Severity(severity=severity_entry)
severity.save()
# Create a FindingType object for the provided type (e.g. Network)
type_entry = entry.get('finding_type', 'Network')
try:
finding_type = FindingType.objects.get(finding_type__iexact=type_entry)
except FindingType.DoesNotExist:
finding_type = FindingType(finding_type=type_entry)
finding_type.save()
try:
instance, created = Finding.objects.update_or_create(
title=entry.get('title')
)
for attr, value in entry.items():
if attr not in ['severity', 'finding_type']:
setattr(instance, attr, value)
instance.severity = severity
instance.finding_type = finding_type
instance.save()
except Exception as e:
messages.error(request, 'Failed parsing %s: %s' %
(entry['title'], e), extra_tags='alert-danger')
logging.getLogger('error_logger').error(repr(e))
error_count += 1
pass
messages.success(request, 'Your csv file has been imported '
'successfully =)', extra_tags='alert-success')
except Exception as e:
messages.error(request, str(e), extra_tags='alert-danger')
logging.getLogger('error_logger').error(repr(e))
return HttpResponseRedirect(reverse('reporting:import_findings'))
@login_required
def assign_finding(request, pk):
"""View function for adding a finding to the user's active report."""
def get_position(report_pk):
position = ReportFindingLink.objects.\
filter(report__pk=report_pk).count()
if position:
return position + 1
else:
return 1
# The user must have the `active_report` session variable
# Get the variable and default to `None` if it does not exist
active_report = request.session.get('active_report', None)
if active_report:
try:
report = Report.objects.get(pk=active_report['id'])
except Exception:
messages.error(request, 'You have no active report! Select a '
'report to edit before trying to edit one.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:findings'))
finding = Finding.objects.get(pk=pk)
report_link = ReportFindingLink(title=finding.title,
description=finding.description,
impact=finding.impact,
mitigation=finding.mitigation,
replication_steps=finding.
replication_steps,
host_detection_techniques=finding.
host_detection_techniques,
network_detection_techniques=finding.
network_detection_techniques,
references=finding.references,
severity=finding.severity,
finding_type=finding.finding_type,
finding_guidance=finding.finding_guidance,
report=report,
assigned_to=request.user,
position=get_position(
active_report['id']))
report_link.save()
messages.success(request, '%s successfully added to report.' %
finding.title, extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:findings'))
else:
messages.error(request, 'You have no active report! Select a report '
'to edit before trying to edit one.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:findings'))
@login_required
def assign_blank_finding(request, pk):
"""View function for adding a blank finding to the specified report."""
def get_position(report_pk):
position = ReportFindingLink.objects.filter(report=report).count()
if position:
return position + 1
else:
return 1
try:
report = Report.objects.get(pk=pk)
except Exception:
messages.error(request, 'A valid report could not be found for this '
'blank finding.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
report_link = ReportFindingLink(title='Blank Template',
description='',
impact='',
mitigation='',
replication_steps='',
host_detection_techniques='',
network_detection_techniques='',
references='',
severity=Severity.objects.
get(severity='Informational'),
finding_type=FindingType.objects.
get(finding_type='Network'),
report=report,
assigned_to=request.user,
position=get_position(report))
report_link.save()
messages.success(request, 'A blank finding has been successfully added to '
'report.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail', args=(report.id,)))
@login_required
def activate_report(request, pk):
"""View function to set the specified report as the current user's active
report.
"""
# Set the user's session variable
try:
report_instance = Report.objects.get(pk=pk)
if report_instance:
request.session['active_report'] = {}
request.session['active_report']['id'] = pk
request.session['active_report']['title'] = report_instance.title
messages.success(request, '%s is now your active report.' %
report_instance.title, extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail', args=(pk, )))
else:
messages.error(request, 'The specified report does not exist!',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
except Exception:
messages.error(request, 'Could not set the requested report as your '
'active report.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
@login_required
def report_status_toggle(request, pk):
"""View function to toggle the status for the specified report."""
try:
report_instance = Report.objects.get(pk=pk)
if report_instance:
if report_instance.complete:
report_instance.complete = False
report_instance.save()
messages.success(request, '%s is now marked as incomplete.' %
report_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(pk, )))
else:
report_instance.complete = True
report_instance.save()
messages.success(request, '%s is now marked as complete.' %
report_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(pk, )))
else:
messages.error(request, 'The specified report does not exist!',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
except Exception:
messages.error(request, "Could not update the report's status!",
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
@login_required
def report_delivery_toggle(request, pk):
"""View function to toggle the delivery status for the specified report."""
try:
report_instance = Report.objects.get(pk=pk)
if report_instance:
if report_instance.delivered:
report_instance.delivered = False
report_instance.save()
messages.success(request, '%s is now marked as not delivered.' %
report_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(pk, )))
else:
report_instance.delivered = True
report_instance.save()
messages.success(request, '%s is now marked as delivered.' %
report_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(pk, )))
else:
messages.error(request, 'The specified report does not exist!',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
except Exception:
messages.error(request, "Could not update the report's status!",
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
@login_required
def finding_status_toggle(request, pk):
"""View function to toggle the status for the specified finding."""
try:
finding_instance = ReportFindingLink.objects.get(pk=pk)
if finding_instance:
if finding_instance.complete:
finding_instance.complete = False
finding_instance.save()
messages.success(request, '%s is now marked as in need of '
'editing.' % finding_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(
finding_instance.report.id, )))
else:
finding_instance.complete = True
finding_instance.save()
messages.success(request, '%s is now marked as ready for '
'review.' % finding_instance.title,
extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(
finding_instance.report.id, )))
else:
messages.error(request, 'The specified finding does not exist!',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
except Exception:
messages.error(request, 'Could not set the requested finding as '
'complete.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('reporting:reports'))
@login_required
def upload_evidence(request, pk):
"""View function for handling evidence file uploads."""
if request.method == 'POST':
form = EvidenceForm(request.POST, request.FILES)
if form.is_valid():
form.save()
active_report = request.session.get('active_report', None)
messages.success(request, 'Evidence uploaded successfully',
extra_tags='alert-success')
if 'id' in active_report:
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(active_report['id'],)))
else:
return HttpResponseRedirect(reverse('reporting:reports'))
else:
form = EvidenceForm(initial={
'finding': pk,
'uploaded_by': request.user
})
return render(request, 'reporting/evidence_form.html', {'form': form})
@login_required
def view_evidence(request, pk):
"""View function for viewing evidence file uploads."""
evidence_instance = Evidence.objects.get(pk=pk)
file_content = None
if (
evidence_instance.document.name.endswith('.txt') or
evidence_instance.document.name.endswith('.log') or
evidence_instance.document.name.endswith('.ps1') or
evidence_instance.document.name.endswith('.py') or
evidence_instance.document.name.endswith('.md')
):
filetype = 'text'
file_content = evidence_instance.document.read().splitlines()
elif (
evidence_instance.document.name.endswith('.jpg') or
evidence_instance.document.name.endswith('.png') or
evidence_instance.document.name.endswith('.jpeg')
):
filetype = 'image'
else:
filetype = 'unknown'
context = {
'filetype': filetype,
'evidence': evidence_instance,
'file_content': file_content
}
return render(request, 'reporting/evidence_detail.html', context=context)
@login_required
def position_increase(request, pk):
"""View function to increase a finding's position which moves it down the
list.
"""
finding_instance = ReportFindingLink.objects.get(pk=pk)
finding_instance.position = finding_instance.position + 1
finding_instance.save(update_fields=['position'])
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(finding_instance.report.id,)))
@login_required
def position_decrease(request, pk):
"""View function to decrease a finding's position which moves it up the
list.
"""
finding_instance = ReportFindingLink.objects.get(pk=pk)
finding_instance.position = finding_instance.position - 1
finding_instance.save(update_fields=['position'])
return HttpResponseRedirect(reverse('reporting:report_detail',
args=(finding_instance.report.id,)))
@login_required
def generate_docx(request, pk):
"""View function to generate a docx report for the specified report."""
report_instance = Report.objects.get(pk=pk)
# Ask Spenny to make us a report with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = os.path.join(settings.TEMPLATE_LOC, 'template.docx')
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
docx = spenny.generate_word_docx()
response = HttpResponse(
content_type='application/vnd.openxmlformats-officedocument.'
'wordprocessingml.document')
response['Content-Disposition'] = 'attachment; filename=report.docx'
docx.save(response)
return response
@login_required
def generate_xlsx(request, pk):
"""View function to generate a xlsx report for the specified report."""
report_instance = Report.objects.get(pk=pk)
# Ask Spenny to make us a report with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = None
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
output = io.BytesIO()
workbook = Workbook(output, {'in_memory': True})
spenny.generate_excel_xlsx(workbook)
output.seek(0)
response = HttpResponse(
output.read(),
content_type='application/application/vnd.openxmlformats-'
'officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=report.xlsx'
output.close()
return response
@login_required
def generate_pptx(request, pk):
"""View function to generate a pptx report for the specified report."""
report_instance = Report.objects.get(pk=pk)
# Ask Spenny to make us a report with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = os.path.join(settings.TEMPLATE_LOC, 'template.pptx')
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
pptx = spenny.generate_powerpoint_pptx()
response = HttpResponse(
content_type='application/application/vnd.openxmlformats-'
'officedocument.presentationml.presentation')
response['Content-Disposition'] = 'attachment; filename=report.pptx'
pptx.save(response)
return response
@login_required
def generate_json(request, pk):
"""View function to generate a json report for the specified report."""
report_instance = Report.objects.get(pk=pk)
# Ask Spenny to make us a report with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = None
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
json = spenny.generate_json()
return HttpResponse(json, 'application/json')
@login_required
def generate_all(request, pk):
"""View function to generate all report types for the specified report."""
report_instance = Report.objects.get(pk=pk)
docx_template_loc = os.path.join(settings.TEMPLATE_LOC, 'template.docx')
pptx_template_loc = os.path.join(settings.TEMPLATE_LOC, 'template.pptx')
# Ask Spenny to make us reports with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = os.path.join(
settings.MEDIA_ROOT,
'templates',
'template.docx')
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
json_doc, word_doc, excel_doc, ppt_doc = spenny.generate_all_reports(
docx_template_loc,
pptx_template_loc)
# Create a zip file in memory and add the reports to it
zip_buffer = io.BytesIO()
zf = zipfile.ZipFile(zip_buffer, 'a')
zf.writestr('report.json', json_doc)
zf.writestr('report.docx', word_doc.getvalue())
zf.writestr('report.xlsx', excel_doc.getvalue())
zf.writestr('report.pptx', ppt_doc.getvalue())
zf.close()
zip_buffer.seek(0)
# Return the buffer in the HTTP response
response = HttpResponse(content_type='application/x-zip-compressed')
response['Content-Disposition'] = 'attachment; filename=reports.zip'
response.write(zip_buffer.read())
return response
@login_required
def zip_directory(path, zip_handler):
"""Zip the target directory and all of its contents, for archiving
purposes.
"""
# Walk the target directory
abs_src = os.path.abspath(path)
for root, dirs, files in os.walk(path):
# Add each file to the zip file handler
for file in files:
absname = os.path.abspath(os.path.join(root, file))
arcname = absname[len(abs_src) + 1:]
zip_handler.write(os.path.join(root, file), 'evidence/' + arcname)
@login_required
def archive(request, pk):
"""View function to generate all report types for the specified report and
then zip all reports and evidence. The archive file is saved is saved in
the archives directory.
"""
report_instance = Report.objects.\
select_related('project', 'project__client').get(pk=pk)
archive_loc = os.path.join(settings.MEDIA_ROOT, 'archives')
evidence_loc = os.path.join(settings.MEDIA_ROOT, 'evidence', str(pk))
docx_template_loc = os.path.join(
settings.MEDIA_ROOT,
'templates',
'template.docx')
pptx_template_loc = os.path.join(
settings.MEDIA_ROOT,
'templates',
'template.pptx')
# Ask Spenny to make us reports with these findings
output_path = os.path.join(settings.MEDIA_ROOT, report_instance.title)
evidence_path = os.path.join(settings.MEDIA_ROOT)
template_loc = os.path.join(
settings.MEDIA_ROOT,
'templates',
'template.docx')
spenny = reportwriter.Reportwriter(
report_instance,
output_path,
evidence_path,
template_loc)
json_doc, word_doc, excel_doc, ppt_doc = spenny.generate_all_reports(
docx_template_loc,
pptx_template_loc)
# Create a zip file in memory and add the reports to it
zip_buffer = io.BytesIO()
zf = zipfile.ZipFile(zip_buffer, 'a')
zf.writestr('report.json', json_doc)
zf.writestr('report.docx', word_doc.getvalue())
zf.writestr('report.xlsx', excel_doc.getvalue())
zf.writestr('report.pptx', ppt_doc.getvalue())
zip_directory(evidence_loc, zf)
zf.close()
zip_buffer.seek(0)
with open(os.path.join(
archive_loc,
report_instance.title + '.zip'),
'wb') as archive_file:
archive_file.write(zip_buffer.read())
new_archive = Archive(
client=report_instance.project.client,
report_archive=File(open(os.path.join(
archive_loc,
report_instance.title + '.zip'), 'rb')))
new_archive.save()
messages.success(request, '%s has been archived successfully.' %
report_instance.title, extra_tags='alert-success')
return HttpResponseRedirect(reverse('reporting:archived_reports'))
@login_required
def download_archive(request, pk):
"""View function to allow for downloading archived reports."""
archive_instance = Archive.objects.get(pk=pk)
file_path = os.path.join(
settings.MEDIA_ROOT,
archive_instance.report_archive.path)
if os.path.exists(file_path):
with open(file_path, 'rb') as archive:
response = HttpResponse(
archive.read(),
content_type='application/x-zip-compressed')
response['Content-Disposition'] = \
'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
@login_required
def clone_report(request, pk):
"""View function to clone the specified report along with all of its
findings.
"""
report_instance = ReportFindingLink.objects.\
select_related('report').filter(report=pk)
# Clone the report by editing title, setting PK to `None`, and saving it
report_to_clone = report_instance[0].report
report_to_clone.title = report_to_clone.title + ' Copy'
report_to_clone.complete = False
report_to_clone.pk = None
report_to_clone.save()
new_report_pk = report_to_clone.pk
for finding in report_instance:
finding.report = report_to_clone
finding.pk = None
finding.save()
return HttpResponseRedirect(reverse(
'reporting:report_detail',
kwargs={'pk': new_report_pk}))
@login_required
def convert_finding(request, pk):
"""View function to convert a finding in a report to a master finding
for the library.
"""
finding_instance = ReportFindingLink.objects.get(pk=pk)
new_finding = Finding(
title=finding_instance.title,
description=finding_instance.description,
impact=finding_instance.impact,
mitigation=finding_instance.mitigation,
replication_steps=finding_instance.replication_steps,
host_detection_techniques=finding_instance.host_detection_techniques,
network_detection_techniques=finding_instance.network_detection_techniques,
references=finding_instance.references,
severity=finding_instance.severity,
finding_type=finding_instance.finding_type
)
new_finding.save()
new_finding_pk = new_finding.pk
return HttpResponseRedirect(reverse(
'reporting:finding_detail',
kwargs={'pk': new_finding_pk}))
################
# View Classes #
################
class FindingDetailView(LoginRequiredMixin, generic.DetailView):
"""View showing the details for the specified finding. This view defaults
to the finding_detail.html template.
"""
model = Finding
class FindingCreate(LoginRequiredMixin, CreateView):
"""View for creating new findings. This view defaults to the
finding_form.html template.
"""
model = Finding
form_class = FindingCreateForm
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(self.request, '%s was successfully created.' %
self.object.title, extra_tags='alert-success')
return reverse('reporting:finding_detail', kwargs={'pk': self.object.pk})
class FindingUpdate(LoginRequiredMixin, UpdateView):
"""View for updating existing findings. This view defaults to the
finding_form.html template.
"""
model = Finding
fields = '__all__'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(self.request, 'Master record for %s was '
'successfully updated.' % self.get_object().title,
extra_tags='alert-success')
return reverse('reporting:finding_detail', kwargs={'pk': self.object.pk})
class FindingDelete(LoginRequiredMixin, DeleteView):
"""View for deleting existing findings. This view defaults to the
finding_confirm_delete.html template.
"""
model = Finding
template_name = 'confirm_delete.html'
def get_success_url(self):
"""Override the function to return a message after deletion."""
messages.warning(self.request, 'Master record for %s was successfully '
'deleted.' % self.get_object().title,
extra_tags='alert-warning')
return reverse_lazy('reporting:findings')
def get_context_data(self, **kwargs):
"""Override the `get_context_data()` function to provide additional
information.
"""
ctx = super(FindingDelete, self).get_context_data(**kwargs)
queryset = kwargs['object']
ctx['object_type'] = 'finding master record'
ctx['object_to_be_deleted'] = queryset.title
return ctx
class ReportDetailView(LoginRequiredMixin, generic.DetailView):
"""View showing the details for the specified report. This view defaults to the
report_detail.html template.
"""
model = Report
class ReportCreate(LoginRequiredMixin, CreateView):
"""View for creating new reports. This view defaults to the
report_form.html template.
"""
model = Report
form_class = ReportCreateForm
def form_valid(self, form):
"""Override form_valid to perform additional actions on new entries."""
from ghostwriter.rolodex.models import Project
project = get_object_or_404(Project, pk=self.kwargs.get('pk'))
form.instance.project = project
form.instance.created_by = self.request.user
self.request.session['active_report'] = {}
self.request.session['active_report']['title'] = form.instance.title
return super().form_valid(form)
def get_initial(self):
"""Set the initial values for the form."""
from ghostwriter.rolodex.models import Project
project = get_object_or_404(Project, pk=self.kwargs.get('pk'))
title = '{} {} ({}) Report'.format(
project.client,
project.project_type,
project.start_date)
return {
'title': title,
}
def get_success_url(self):
"""Override the function to return to the new record after creation."""
self.request.session['active_report']['id'] = self.object.pk
self.request.session.modified = True
messages.success(self.request, 'New report was successfully created '
'and is now your active report.',
extra_tags='alert-success')
return reverse('reporting:report_detail', kwargs={'pk': self.object.pk})
class ReportUpdate(LoginRequiredMixin, UpdateView):
"""View for updating existing reports. This view defaults to the
report_form.html template.
"""
model = Report
fields = ('title', 'complete')
def form_valid(self, form):
"""Override form_valid to perform additional actions on update."""
self.request.session['active_report'] = {}
self.request.session['active_report']['id'] = form.instance.id
self.request.session['active_report']['title'] = form.instance.title
self.request.session.modified = True
return super().form_valid(form)
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(self.request, 'Report was updated successfully.',
extra_tags='alert-success')
return reverse('reporting:report_detail', kwargs={'pk': self.object.pk})
class ReportDelete(LoginRequiredMixin, DeleteView):
"""View for deleting existing reports. This view defaults to the
report_confirm_delete.html
template.
"""
model = Report
template_name = 'confirm_delete.html'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
self.request.session['active_report'] = {}
self.request.session['active_report']['id'] = ''
self.request.session['active_report']['title'] = ''
self.request.session.modified = True
messages.warning(self.request, 'Report and associated evidence files '
'were deleted successfully.',
extra_tags='alert-warning')
return reverse_lazy('reporting:reports')
def get_context_data(self, **kwargs):
"""Override the `get_context_data()` function to provide additional
information.
"""
ctx = super(ReportDelete, self).get_context_data(**kwargs)
queryset = kwargs['object']
ctx['object_type'] = 'entire report, evidence and all'
ctx['object_to_be_deleted'] = queryset.title
return ctx
class ReportFindingLinkUpdate(LoginRequiredMixin, UpdateView):
"""View for updating the local copies of a finding linked to a report.
This view defaults to the local_edit.html template."""
model = ReportFindingLink
form_class = ReportFindingLinkUpdateForm
template_name = 'reporting/local_edit.html'
success_url = reverse_lazy('reporting:reports')
def get_form(self, form_class=None):
"""Override the function to set a custom queryset for the form."""
from ghostwriter.rolodex.models import ProjectAssignment
form = super(ReportFindingLinkUpdate, self).get_form(form_class)
user_primary_keys = ProjectAssignment.objects.\
filter(project=self.object.report.project).\
values_list('operator', flat=True)
form.fields['assigned_to'].queryset = User.objects.\
filter(id__in=user_primary_keys)
return form
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(self.request, '%s was successfully updated.' %
self.get_object().title, extra_tags='alert-success')
return reverse('reporting:report_detail', kwargs={'pk': self.object.report.id})
class ReportFindingLinkDelete(LoginRequiredMixin, DeleteView):
"""View for updating the local copies of a finding linked to a report.
This view defaults to the local_remove.html template."""
model = ReportFindingLink
template_name = 'reporting/local_remove.html'
def get_success_url(self, **kwargs):
"""Override function to return to the report."""
messages.warning(self.request, '%s was removed from this report.' %
self.get_object().title, extra_tags='alert-warning')
return reverse_lazy('reporting:report_detail', args=(self.report_pk,))
def delete(self, request, *args, **kwargs):
"""Override function to save the report ID before deleting the
finding.
"""
self.report_pk = self.get_object().report.pk
return super(ReportFindingLinkDelete, self).\
delete(request, *args, **kwargs)
class EvidenceDetailView(LoginRequiredMixin, generic.DetailView):
"""View showing the details for the specified evidence file. This view
defaults to the evidence_detail.html template.
"""
model = Evidence
class EvidenceUpdate(LoginRequiredMixin, UpdateView):
"""View for updating existing evidence. This view defaults to the
evidence_form.html template.
"""
model = Evidence
form_class = EvidenceForm
def get_success_url(self):
"""Override the function to return to the report after updates."""
messages.success(self.request, '%s was successfully updated.' %
self.get_object().friendly_name,
extra_tags='alert-success')
return reverse(
'reporting:report_detail',
kwargs={'pk': self.object.finding.report.pk})
class EvidenceDelete(LoginRequiredMixin, DeleteView):
"""View for deleting existing evidence. This view defaults to the
evidence_confirm_delete.html template.
"""
model = Evidence
template_name = 'confirm_delete.html'
def get_success_url(self):
"""Override the function to return to the report after deletion."""
messages.warning(self.request, '%s was removed from this report and '
'the associated file has been deleted.' %
self.get_object().friendly_name,
extra_tags='alert-warning')
return reverse(
'reporting:report_detail',
kwargs={'pk': self.object.finding.report.pk})
def delete(self, request, *args, **kwargs):
"""Override function to save the report ID before deleting the
finding.
"""
full_path = os.path.join(
settings.MEDIA_ROOT,
self.get_object().document.name)
directory = os.path.dirname(full_path)
os.remove(full_path)
# Try to delete the directory tree if this was the last/only file
try:
os.removedirs(directory)
except Exception:
pass
return super(EvidenceDelete, self).delete(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""Override the `get_context_data()` function to provide additional
information.
"""
ctx = super(EvidenceDelete, self).get_context_data(**kwargs)
queryset = kwargs['object']
ctx['object_type'] = 'evidence file'
ctx['object_to_be_deleted'] = queryset.friendly_name
return ctx
class FindingNoteCreate(LoginRequiredMixin, CreateView):
"""View for creating new note entries. This view defaults to the
note_form.html template.
"""
model = FindingNote
form_class = FindingNoteCreateForm
template_name = 'note_form.html'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(
self.request,
'Note successfully added to this finding.',
extra_tags='alert-success')
return reverse('reporting:finding_detail', kwargs={'pk': self.object.finding.id})
def get_initial(self):
"""Set the initial values for the form."""
finding_instance = get_object_or_404(
Finding, pk=self.kwargs.get('pk'))
finding = finding_instance
return {
'finding': finding,
'operator': self.request.user
}
class FindingNoteUpdate(LoginRequiredMixin, UpdateView):
"""View for updating existing note entries. This view defaults to the
note_form.html template.
"""
model = FindingNote
form_class = FindingNoteCreateForm
template_name = 'note_form.html'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(
self.request,
'Note successfully updated.',
extra_tags='alert-success')
return reverse('reporting:finding_detail', kwargs={'pk': self.object.finding.pk})
class FindingNoteDelete(LoginRequiredMixin, DeleteView):
"""View for deleting existing note entries. This view defaults to the
confirm_delete.html template.
"""
model = FindingNote
template_name = 'confirm_delete.html'
def get_success_url(self):
"""Override the function to return to the server after deletion."""
messages.warning(
self.request,
'Note successfully deleted.',
extra_tags='alert-warning')
return reverse('reporting:finding_detail', kwargs={'pk': self.object.finding.pk})
def get_context_data(self, **kwargs):
"""Override the `get_context_data()` function to provide additional
information.
"""
ctx = super(FindingNoteDelete, self).get_context_data(**kwargs)
queryset = kwargs['object']
ctx['object_type'] = 'note'
ctx['object_to_be_deleted'] = queryset.note
return ctx
class LocalFindingNoteCreate(LoginRequiredMixin, CreateView):
"""View for creating new note entries. This view defaults to the
note_form.html template.
"""
model = LocalFindingNote
form_class = LocalFindingNoteCreateForm
template_name = 'note_form.html'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(
self.request,
'Note successfully added to this finding.',
extra_tags='alert-success')
return reverse('reporting:local_edit', kwargs={'pk': self.object.finding.id})
def get_initial(self):
"""Set the initial values for the form."""
finding_instance = get_object_or_404(
ReportFindingLink, pk=self.kwargs.get('pk'))
finding = finding_instance
return {
'finding': finding,
'operator': self.request.user
}
class LocalFindingNoteUpdate(LoginRequiredMixin, UpdateView):
"""View for updating existing note entries. This view defaults to the
note_form.html template.
"""
model = LocalFindingNote
form_class = LocalFindingNoteCreateForm
template_name = 'note_form.html'
def get_success_url(self):
"""Override the function to return to the new record after creation."""
messages.success(
self.request,
'Note successfully updated.',
extra_tags='alert-success')
return reverse('reporting:local_edit', kwargs={'pk': self.object.finding.pk})
class LocalFindingNoteDelete(LoginRequiredMixin, DeleteView):
"""View for deleting existing note entries. This view defaults to the
confirm_delete.html template.
"""
model = LocalFindingNote
template_name = 'confirm_delete.html'
def get_success_url(self):
"""Override the function to return to the server after deletion."""
messages.warning(
self.request,
'Note successfully deleted.',
extra_tags='alert-warning')
return reverse('reporting:local_edit', kwargs={'pk': self.object.finding.pk})
def get_context_data(self, **kwargs):
"""Override the `get_context_data()` function to provide additional
information.
"""
ctx = super(LocalFindingNoteDelete, self).get_context_data(**kwargs)
queryset = kwargs['object']
ctx['object_type'] = 'note'
ctx['object_to_be_deleted'] = queryset.note
return ctx
| StarcoderdataPython |
1775164 |
#!/usr/bin/env python
# coding: utf-8
import sys
from time import time
from time import sleep
import xarray as xr
import boto3
import os
import rioxarray
import rasterio
import argparse
def _split_full_path(bucket_full_path):
if 's3://' in bucket_full_path:
bucket_full_path=bucket_full_path.replace('s3://','')
print(bucket_full_path)
bucket = bucket_full_path.split('/')[0]
bucket_filepath = '/'.join(bucket_full_path.split('/')[1:])
return (bucket, bucket_filepath)
def s3_push_delete_local(local_file, bucket_full_path):
s3 = boto3.client('s3')
with open(local_file, "rb") as f:
(bucket, bucket_filepath) = _split_full_path(bucket_full_path)
s3.upload_fileobj(f, bucket, bucket_filepath)
os.remove(local_file)
def write_GeoTif_like(templet_tif_file, output_ndarry, output_tif_file):
import rasterio
orig = rasterio.open(templet_tif_file)
print('write file ', output_tif_file)
with rasterio.open(output_tif_file, 'w', driver='GTiff', height=output_ndarry.shape[0],
width=output_ndarry.shape[1], count=1, dtype=output_ndarry.dtype,
crs=orig.crs, transform=orig.transform, nodata=-9999) as dst:
dst.write(output_ndarry, 1)
def _get_year_month(product, tif):
fn = tif.split('/')[-1]
fn = fn.replace(product,'')
fn = fn.replace('.tif','')
print(fn)
fn=fn[-3:]
return fn
def _xr_open_rasterio_retry(s3_file_name):
cnt=10
while(cnt>0):
try:
da = xr.open_rasterio(s3_file_name)
return da
except rasterio.errors.RasterioIOError:
print("Unexpected error:", sys.exc_info()[0])
print('oops',cnt)
print('oops',s3_file_name)
cnt = cnt - 1
sleep(4)
def xr_build_cube_concat_ds(tif_list, product):
start = time()
my_da_list =[]
year_month_list = []
for tif in tif_list:
tiffile = tif
#print(tiffile)
da = _xr_open_rasterio_retry(tiffile)
my_da_list.append(da)
tnow = time()
elapsed = tnow - start
#print(tif, elapsed)
print('.',flush=True)
year_month_list.append(_get_year_month(product, tif))
da = xr.concat(my_da_list, dim='band')
da = da.rename({'band':'day'})
da = da.assign_coords(day=year_month_list)
DS = da.to_dataset(name=product)
return(DS)
def create_s3_list_of_days(main_prefix, year, temperatureType):
output_name = f'{temperatureType}_'
the_list = []
for i in range(1,366):
day = f'{i:03d}'
file_object = main_prefix + temperatureType + '/' + str(year) + '/' + output_name + str(year) + day + '.tif'
the_list.append(file_object)
return the_list
def main_runner(year, temperatureType):
main_bucket_prefix='s3://dev-et-data/in/DelawareRiverBasin/Temp/'
#year='1950'
#temperatureType = 'Tasavg'
tif_list = create_s3_list_of_days(main_bucket_prefix, year, temperatureType)
ds = xr_build_cube_concat_ds(tif_list, temperatureType)
for i in range(0,ds.dims['day']):
print(ds[temperatureType][i]['day'])
ds = ds - 273.15 # convert data array xarray.DataSet from Kelvin to Celsius
output_main_prefix='s3://dev-et-data/in/DelawareRiverBasin/TempCelsius/'
path ='./tmp'
os.makedirs(path, exist_ok=True)
write_out_celsius_tifs(output_main_prefix, ds, year, output_name=temperatureType)
def write_out_celsius_tifs(main_prefix, ds, year, output_name):
num_days=ds.dims['day']
for i in range(0,num_days ):
dayi = i+1
day="{:03d}".format(dayi)
s3_file_object = main_prefix + output_name + '/' + str(year) + '/' + output_name + '_' + str(year) + day + '.tif'
print(s3_file_object)
file_object = './tmp/' + output_name + '_' + str(year) + day + '.tif'
print(file_object)
np_array = ds[output_name].isel(day=i).values
# print(type(np_array))
my_template = 's3://dev-et-data/in/DelawareRiverBasin/Temp/Tasavg/1950/Tasavg_1950017.tif'
write_GeoTif_like(my_template, np_array, file_object)
s3_push_delete_local(file_object, s3_file_object)
def get_parser():
parser = argparse.ArgumentParser(description='Run the kelvin code')
parser.add_argument('-y', '--year', help='specify year or Annual or all example: -y 1999 ', default='Annual', type=str)
parser.add_argument('-t', '--type', help='temp type ex: Tasavg , Tasmax, Tasmin' , default='Tasmax', type=str)
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['year']:
year = args['year']
print("year", args['year'])
if args['type']:
tempType = args['type']
print("type", args['type'])
main_runner(year, tempType)
if __name__ == '__main__':
command_line_runner()
| StarcoderdataPython |
1750351 | <reponame>sattwik21/Hacktoberfest2021-1
import statistics
import numpy as np
# fungsi untuk mengurutkan bilangan
def sort(num):
num.sort()
print("Urutkan angka: ", end = "")
for i in num:
if i < len(num):
print(i, end=", ")
else:
print(i)
# fungsi untuk menghitung rata-rata
def average(num):
result = sum(num) / len(num)
print("Rata-rata: ", round(result, 2))
# fungsi untuk menghitung median
def median(num):
print("Median: ", statistics.median(num))
# fungsi untuk menghitung prekalian semua bilangan
def multiply(num):
result = np.prod(num)
print("Hasil kali semua bilangan: ", result)
num = []
i = 0;
print("============= PROGRAM INPUT BILANGAN ===============")
amount_num = int(input("Berapa banyak bilangan yang ingin dihitung?: "))
while i < amount_num :
num_input = int(input("Bilangan ke - %d: " %(i+1)))
num.append(num_input)
i += 1
print("====================================================")
sort(num)
average(num)
median(num)
multiply(num)
| StarcoderdataPython |
4838301 | name = 'EMBL2checklists'
__all__ = ['ChecklistOps', 'globalVariables', 'EMBL2checklistsMain', 'PrerequisiteOps']
| StarcoderdataPython |
3306737 | <filename>libs/utils/auth.py
import json
import logging
import os
from datetime import datetime, timedelta
from jose import jwt
from passlib.context import CryptContext
logger = logging.getLogger(__name__)
API_USER_CREDENTIALS_FOLDER = "/repo/data/auth/"
API_USER_PATH = f"{API_USER_CREDENTIALS_FOLDER}/api_user.txt"
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
user_cache = dict()
def create_api_user(user: str, password: str):
if os.path.isfile(API_USER_PATH) and os.path.getsize(API_USER_PATH) != 0:
# API credential already created
logger.error("API credentials already created.")
raise Exception("Error creating user. API credentials already created.")
os.makedirs(API_USER_CREDENTIALS_FOLDER, exist_ok=True)
with open(API_USER_PATH, "w+") as api_user_file:
api_user_credentials = {
"user": user,
"password": pwd_context.hash(password)
}
json.dump(api_user_credentials, api_user_file)
def validate_user_credentials(user: str, password: str) -> bool:
if not os.path.isfile(API_USER_PATH) or os.path.getsize(API_USER_PATH) == 0:
logger.error("API credentials doesn't exit.")
return False
with open(API_USER_PATH, "r") as api_user_file:
stored_credentials = json.load(api_user_file)
if stored_credentials["user"] != user or not pwd_context.verify(password, stored_credentials["password"]):
return False
return True
def create_access_token(user: str):
# Set a week (60*24*7=10080) as expiration date
expire_date = datetime.utcnow() + timedelta(minutes=10080)
data = {"sub": user}
data.update({"exp": expire_date})
return jwt.encode(data, os.environ.get("SECRET_ACCESS_KEY"), algorithm="HS256")
def validate_jwt_token(token):
playload = jwt.decode(token, os.environ.get("SECRET_ACCESS_KEY"), algorithms=["HS256"])
if not user_cache.get("user"):
with open(API_USER_PATH, "r") as api_user_file:
stored_credentials = json.load(api_user_file)
user_cache["user"] = stored_credentials["user"]
if playload["sub"] != user_cache["user"]:
raise Exception("JWT doesn't belong to user.")
| StarcoderdataPython |
3255287 | <reponame>Pittsy24/Python-Vectors<filename>vectors.py
#!/usr/bin/python3
# By <NAME>
import math
class vector2D(object):
def __init__(self, x_coord, y_coord):
super().__init__()
self.x = x_coord
self.y = y_coord
def set(self, x_coord, y_coord):
"""Sets the x, y component of the vector.
>>> v = vector2D(10, 20)
>>> v.set(20, 24)
>>> print(v.x)
20
>>> print(v.y)
24
"""
self.x = x_coord
self.y = y_coord
def copy(self):
"""Return a copy of the vector.
>>> v = vector2D(10, 20)
>>> c = v.copy()
>>> print(c.x)
10
>>> print(c.y)
20
"""
return vector2D(self.x ,self.y)
def add(self, x_coord, y_coord):
"""Adds x and y components to the vector.
>>> v = vector2D(10, 20)
>>> v.add(10, 6)
>>> print(v.x)
20
>>> print(v.y)
26
"""
self.x += x_coord
self.y += y_coord
def addVector2D(self, vec):
"""Adds one vector to the other.
>>> v = vector2D(10, 20)
>>> v2 = vector2D(10, 6)
>>> v.addVector2D(v2)
>>> print(v.x)
20
>>> print(v.y)
26
"""
if repr(vec) == "Vector2D":
self.add(vec.x, vec.y)
else:
raise AttributeError("vector2D.addVector2D requires a 2DVector!")
def sub(self, x_coord, y_coord):
"""Subtacts x and y components.
>>> v = vector2D(10, 20)
>>> v.sub(10, 6)
>>> print(v.x)
0
>>> print(v.y)
14
"""
self.x -= x_coord
self.y -= y_coord
def subVector2D(self, vec):
"""Subtacts one vector from the other.
>>> v = vector2D(10, 20)
>>> v2 = vector2D(10, 6)
>>> v.subVector2D(v2)
>>> print(v.x)
0
>>> print(v.y)
14
"""
if repr(vec) == "Vector2D":
self.sub(vec.x, vec.y)
else:
raise AttributeError("vector2D.subVector2D requires a 2DVector!")
def mult(self, x_coord, y_coord):
""" Multiples the x and y components by the supplied coords.
>>> v = vector2D(10, 20)
>>> v.mult(10, 6)
>>> print(v.x)
100
>>> print(v.y)
120
"""
self.x *= x_coord
self.y *= y_coord
def multVector2D(self, vec):
""" Multiples two vectors together.
>>> v = vector2D(10, 20)
>>> v2 = vector2D(10, 6)
>>> v.multVector2D(v2)
>>> print(v.x)
100
>>> print(v.y)
120
"""
if repr(vec) == "Vector2D":
self.mult(vec.x, vec.y)
else:
raise AttributeError("vector2D.multVector2D requires a 2DVector!")
def div(self, x_coord, y_coord):
""" Divides the x and y components by the supplied coords.
>>> v = vector2D(10, 20)
>>> v.div(10, 2)
>>> print(v.x)
1.0
>>> print(v.y)
10.0
"""
self.x /= x_coord
self.y /= y_coord
def divVector2D(self, vec):
""" Divides two vectors x and y components.
>>> v = vector2D(10, 20)
>>> v2 = vector2D(10, 2)
>>> v.divVector2D(v2)
>>> print(v.x)
1.0
>>> print(v.y)
10.0
"""
if repr(vec) == "Vector2D":
self.div(vec.x, vec.y)
else:
raise AttributeError("vector2D.divVector2D requires a 2DVector!")
def mag(self):
""" Calculates the magnitude (length) of the vector.
>>> v = vector2D(3, 4)
>>> v.mag()
5.0
"""
return round(math.sqrt(
(self.x * self.x) +
(self.y * self.y)
), 10)
def magSq(self):
""" Calculates the squared magnitude (length) of the vector (Faster)
>>> v = vector2D(3, 4)
>>> v.magSq()
25
"""
return (
(self.x * self.x) +
(self.y * self.y)
)
def dot(self, x_coord, y_coord):
""" Calculates the dot product between the vector and an x and y coordinate.
>>> v = vector2D(3, 4)
>>> v.dot(3, 79)
325
"""
return (self.x * x_coord) + (self.y * y_coord)
def dotVector2D(self, vec):
""" Calculates the dot product between two vectors.
>>> v = vector2D(3, 4)
>>> v2 = vector2D(3, 79)
>>> v.dotVector2D(v2)
325
"""
if repr(vec) == "Vector2D":
return self.dot(vec.x, vec.y)
else:
raise AttributeError("vector2D.dotVector2D requires a 2DVector!")
def dist(self, x_coord, y_coord):
""" Calculates the distance between the Vector and a point.
>>> v = vector2D(3, 4)
>>> v.dist(10, 6)
7.2801098893
"""
a = x_coord - self.x
b = y_coord - self.y
return round(math.sqrt((a*a)+(b*b)), 10)
def distVector2D(self, vec):
""" Calculates the distance between two Vectors.
>>> v = vector2D(3, 4)
>>> v2 = vector2D(10, 6)
>>> v.distVector2D(v2)
7.2801098893
"""
if repr(vec) == "Vector2D":
return self.dist(vec.x, vec.y)
else:
raise AttributeError("vector2D.distVector2D requires a 2DVector!")
def normalise(self, mag_limit = 1):
""" Normalise the vector to a magnitude of mag_limit.
>>> v = vector2D(4, 7)
>>> v.normalise()
>>> print(v)
(0.49613893835674455, 0.868243142124303)
>>> v = vector2D(4, 7)
>>> v.normalise(5)
>>> print(v)
(2.480694691783723, 4.341215710621515)
"""
m = self.mag()
sf = (m / mag_limit)
self.div(sf, sf)
def heading(self):
""" Calculates the heading of the Vector
>>> v = vector2D(3, 4)
>>> v.heading()
0.927295218
"""
return round(math.atan(self.y / self.x), 10)
def rotate(self, angle):
""" Rotates the Vector by an angle given in radians.
>>> v = vector2D(3, 4)
>>> v.rotate(1.57)
>>> print(v)
(-3.9976097516, 3.0031843556)
"""
theta = angle
cs = math.cos(theta)
sn = math.sin(theta)
px = self.x * cs - self.y * sn
py = self.x * sn + self.y * cs
self.x = round(px, 10)
self.y = round(py, 10)
def angleBetween(self, x_coord, y_coord):
""" Calculates the angle in radians between the Vector and a point.
>>> v = vector2D(3, 4)
>>> v.angleBetween(24, 4)
0.7233555441
"""
dp = self.dot(x_coord, y_coord)
m1 = self.mag()
m2 = vector2D(x_coord, y_coord).mag()
return round(dp / (m1*m2),10)
def angleBetweenVector2D(self, vec):
""" Calculates the angle in radians between the Vector and a point.
>>> v = vector2D(3, 4)
>>> v2 = vector2D(24, 4)
>>> v.angleBetweenVector2D(v2)
0.7233555441
"""
if repr(vec) == "Vector2D":
return self.angleBetween(vec.x, vec.y)
else:
raise AttributeError("vector2D.angleBetweenVector2D requires a 2DVector!")
def lerp(self, x_coord, y_coord, amnt):
""" Linear interpolate the vector to a coord pair
>>> v = vector2D(3, 4)
>>> v.lerp(6, 3, 0.5)
>>> print(v)
(4.5, 3.5)
"""
if amnt < 0 or amnt > 1:
raise AttributeError("Lerp ammount must be between 0 and 1!")
else:
self.x = self.x * amnt + x_coord * (1-amnt)
self.y = self.y * amnt + y_coord * (1-amnt)
def lerpVector2D(self, vec, amnt):
""" Linear interpolate the vector to another vector
>>> v = vector2D(3, 4)
>>> v2 = vector2D(6, 3)
>>> v.lerpVector2D(v2, 0.5)
>>> print(v)
(4.5, 3.5)
"""
if repr(vec) == "Vector2D":
return self.lerp(vec.x, vec.y, amnt)
else:
raise AttributeError("vector2D.lerpVector2D requires a 2DVector!")
def array(self):
""" Return an array
>>> v = vector2D(3, 4)
>>> v.array()
[3, 4]
"""
return [self.x, self.y]
def intArray(self):
""" Returns an array of the rounded x, y values
>>> v = vector2D(3.6, 2.2)
>>> v.intArray()
[4, 2]
"""
return [round(self.x), round(self.y)]
def __eq__(self, value):
try:
return self.x == value.x and self.y == value.y
except:
return False
def __ne__(self, value):
try:
return self.x != value.x or self.y != value.y
except:
return True
def __repr__(self):
return "Vector2D"
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
class vector3D(object):
def __init__(self, x_coord, y_coord, z_coord):
super().__init__()
self.x = x_coord
self.y = y_coord
self.z = z_coord
def set(self, x_coord, y_coord, z_coord):
"""Sets the x, y component of the vector.
>>> v = vector3D(10, 20, 30)
>>> v.set(20, 24, 26)
>>> print(v.x)
20
>>> print(v.y)
24
>>> print(v.z)
26
"""
self.x = x_coord
self.y = y_coord
self.z = z_coord
def copy(self):
"""Return a copy of the vector.
>>> v = vector3D(10, 20, 30)
>>> c = v.copy()
>>> print(c.x)
10
>>> print(c.y)
20
>>> print(c.z)
30
"""
return vector3D(self.x ,self.y, self.z)
def add(self, x_coord, y_coord, z_coord):
"""Adds x, y and x components to the vector.
>>> v = vector3D(10, 20, 30)
>>> v.add(10, 6, 4)
>>> print(v.x)
20
>>> print(v.y)
26
>>> print(v.z)
34
"""
self.x += x_coord
self.y += y_coord
self.z += z_coord
def addVector3D(self, vec):
"""Adds one vector to the other.
>>> v = vector3D(10, 20, 30)
>>> v2 = vector3D(10, 6, 4)
>>> v.addVector3D(v2)
>>> print(v.x)
20
>>> print(v.y)
26
>>> print(v.z)
34
"""
if repr(vec) == "Vector3D":
self.add(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.addVector3D requires a 3DVector!")
def sub(self, x_coord, y_coord, z_coord):
"""Subtacts x, y and z components.
>>> v = vector3D(10, 20, 30)
>>> v.sub(10, 6, 4)
>>> print(v.x)
0
>>> print(v.y)
14
>>> print(v.z)
26
"""
self.x -= x_coord
self.y -= y_coord
self.z -= z_coord
def subVector3D(self, vec):
"""Subtacts one vector from the other.
>>> v = vector3D(10, 20, 30)
>>> v2 = vector3D(10, 6, 4)
>>> v.subVector3D(v2)
>>> print(v.x)
0
>>> print(v.y)
14
>>> print(v.z)
26
"""
if repr(vec) == "Vector3D":
self.sub(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.subVector3D requires a 3DVector!")
def mult(self, x_coord, y_coord, z_coord):
""" Multiples the x, y and z components by the supplied coords.
>>> v = vector3D(10, 20, 30)
>>> v.mult(10, 6, 4)
>>> print(v.x)
100
>>> print(v.y)
120
>>> print(v.z)
120
"""
self.x *= x_coord
self.y *= y_coord
self.z *= z_coord
def multVector3D(self, vec):
""" Multiples two vectors together.
>>> v = vector3D(10, 20, 30)
>>> v2 = vector3D(10, 6, 4)
>>> v.multVector3D(v2)
>>> print(v.x)
100
>>> print(v.y)
120
>>> print(v.z)
120
"""
if repr(vec) == "Vector3D":
self.mult(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.multVector3D requires a 3DVector!")
def div(self, x_coord, y_coord, z_coord):
""" Divides the x, y and z components by the supplied coords.
>>> v = vector3D(10, 20, 30)
>>> v.div(10, 2, 3)
>>> print(v.x)
1.0
>>> print(v.y)
10.0
>>> print(v.z)
10.0
"""
self.x /= x_coord
self.y /= y_coord
self.z /= z_coord
def divVector3D(self, vec):
""" Divides two vectors x, y and z components.
>>> v = vector3D(10, 20, 30)
>>> v2 = vector3D(10, 2, 3)
>>> v.divVector3D(v2)
>>> print(v.x)
1.0
>>> print(v.y)
10.0
>>> print(v.z)
10.0
"""
if repr(vec) == "Vector3D":
self.div(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.divVector3D requires a 3DVector!")
def mag(self):
""" Calculates the magnitude (length) of the vector.
>>> v = vector3D(3, 4, 12)
>>> v.mag()
13.0
"""
return round(math.sqrt(
(self.x * self.x) +
(self.y * self.y) +
(self.z * self.z)
), 10)
def magSq(self):
""" Calculates the squared magnitude (length) of the vector (Faster)
>>> v = vector3D(3, 4 ,12)
>>> v.magSq()
169
"""
return (
(self.x * self.x) +
(self.y * self.y)+
(self.z * self.z)
)
def dot(self, x_coord, y_coord, z_coord):
""" Calculates the dot product between the vector and an x, y and z coordinate.
>>> v = vector3D(3, 4, 5)
>>> v.dot(3, 79, 24)
445
>>> v = vector3D(1, 0, 0)
>>> v.dot(1, 0, 0)
1
"""
return (self.x * x_coord) + (self.y * y_coord) + (self.z * z_coord)
def dotVector3D(self, vec):
""" Calculates the dot product between two vectors.
>>> v = vector3D(3, 4, 5)
>>> v2 = vector3D(3, 79, 24)
>>> v.dotVector3D(v2)
445
"""
if repr(vec) == "Vector3D":
return self.dot(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.dotVector3D requires a 3DVector!")
def dist(self, x_coord, y_coord, z_coord):
""" Calculates the distance between the Vector and a point.
>>> v = vector3D(3, 4, 5)
>>> v.dist(10, 6, 4)
7.3484692283
"""
a = x_coord - self.x
b = y_coord - self.y
c = z_coord - self.z
return round(math.sqrt((a*a)+(b*b)+(c*c)), 10)
def distVector3D(self, vec):
""" Calculates the distance between two Vectors.
>>> v = vector3D(3, 4, 5)
>>> v2 = vector3D(10, 6, 4)
>>> v.distVector3D(v2)
7.3484692283
"""
if repr(vec) == "Vector3D":
return self.dist(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.distVector3D requires a 3DVector!")
def normalise(self, mag_limit = 1):
""" Normalise the vector to a magnitude of mag_limit.
>>> v = vector3D(4, 7, 9)
>>> v.normalise()
>>> print(v)
(0.33104235544079846, 0.5793241220213973, 0.7448452997417965)
>>> v = vector3D(4, 7, 9)
>>> v.normalise(5)
>>> print(v)
(1.655211777203992, 2.8966206101069862, 3.7242264987089824)
"""
m = self.mag()
sf = (m / mag_limit)
self.div(sf, sf, sf)
def angleBetween(self, x_coord, y_coord, z_coord):
""" Calculates the angle in radians between the Vector and a point.
>>> v = vector3D(2, 4 ,6)
>>> v.angleBetween(0, 5, 10)
0.9561828875
"""
dp = self.dot(x_coord, y_coord, z_coord)
m1 = self.mag()
m2 = vector3D(x_coord, y_coord, z_coord).mag()
return round(dp / (m1*m2),10)
def angleBetweenVector3D(self, vec):
""" Calculates the angle in radians between the Vector and a point.
>>> v = vector3D(2, 4, 6)
>>> v2 = vector3D(0, 5, 10)
>>> v.angleBetweenVector3D(v2)
0.9561828875
"""
if repr(vec) == "Vector3D":
return self.angleBetween(vec.x, vec.y, vec.z)
else:
raise AttributeError("vector3D.angleBetweenVector3D requires a 3DVector!")
def lerp(self, x_coord, y_coord, z_coord, amnt):
""" Linear interpolate the vector to a coord pair
>>> v = vector3D(3, 4, 5)
>>> v.lerp(6, 3, 1, 0.5)
>>> print(v)
(4.5, 3.5, 3.0)
"""
if amnt < 0 or amnt > 1:
raise AttributeError("Lerp ammount must be between 0 and 1!")
else:
self.x = self.x * amnt + x_coord * (1-amnt)
self.y = self.y * amnt + y_coord * (1-amnt)
self.z = self.z * amnt + z_coord * (1-amnt)
def lerpVector3D(self, vec, amnt):
""" Linear interpolate the vector to another vector
>>> v = vector3D(3, 4, 5)
>>> v2 = vector3D(6, 3, 1)
>>> v.lerpVector3D(v2, 0.5)
>>> print(v)
(4.5, 3.5, 3.0)
"""
if repr(vec) == "Vector3D":
return self.lerp(vec.x, vec.y, vec.z, amnt)
else:
raise AttributeError("vector3D.lerpVector3D requires a 3DVector!")
def array(self):
""" Return an array
>>> v = vector3D(3, 4, 5)
>>> v.array()
[3, 4, 5]
"""
return [self.x, self.y, self.z]
def intArray(self):
""" Returns an array of the rounded x, y and z values
>>> v = vector3D(3.6, 2.2, 9.7)
>>> v.intArray()
[4, 2, 10]
"""
return [round(self.x), round(self.y), round(self.z)]
def __eq__(self, value):
try:
return self.x == value.x and self.y == value.y and self.y == value.z
except:
return False
def __ne__(self, value):
try:
return self.x != value.x or self.y != value.y or self.z != value.z
except:
return True
def __repr__(self):
return "Vector3D"
def __str__(self):
return "({0}, {1}, {2})".format(self.x, self.y, self.z)
if __name__ == "__main__":
import doctest
doctest.testmod() | StarcoderdataPython |
1757914 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cc_licences.py
Script to create cc_licence records. Script can be re-run without creating
duplicate records, but will update/replace existing records.
The --clear is not recommended if books already have cc_licences.
"""
import os
import sys
import traceback
from optparse import OptionParser
from gluon import *
from gluon.shell import env
from applications.zcomx.modules.cc_licences import CCLicence
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
APP_ENV = env(__file__.split(os.sep)[-3], import_models=True)
# C0103: *Invalid name "%%s" (should match %%s)*
# pylint: disable=C0103
db = APP_ENV['db']
# line-too-long (C0301): *Line too long (%%s/%%s)*
# pylint: disable=C0301
# The order of TEMPLATES is significant. The db cc_licence.number value is
# set to the index of the codes. The licences are displayed in the ddm
# in the same order.
TEMPLATES = [
'CC0',
'CC BY',
'CC BY-SA',
'CC BY-ND',
'CC BY-NC',
'CC BY-NC-SA',
'CC BY-NC-ND',
'All Rights Reserved',
]
TEMPLATE_DATA = {
'CC0': {
'url': 'http://creativecommons.org/publicdomain/zero/1.0',
'template_img': """TO THE EXTENT POSSIBLE UNDER LAW, {owner} HAS WAIVED ALL COPYRIGHT AND RELATED OR NEIGHBORING RIGHTS TO "{title}". THIS WORK IS PUBLISHED FROM: {place}. FOR MORE INFORMATION, VISIT {url}.""",
'template_web': """TO THE EXTENT POSSIBLE UNDER LAW, <a href="{owner_url}">{owner}</a> HAS <a href="{url}" target="_blank" rel="noopener noreferrer">WAIVED ALL COPYRIGHT AND RELATED OR NEIGHBORING RIGHTS</a> TO <a href="{title_url}">{title}</a>. THIS WORK IS PUBLISHED FROM: {place}."""
},
'CC BY': {
'url': 'http://creativecommons.org/licenses/by/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY 4.0 INT`L LICENSE</a>."""
},
'CC BY-SA': {
'url': 'http://creativecommons.org/licenses/by-sa/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION-SHAREALIKE 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY-SA 4.0 INT`L LICENSE</a>."""
},
'CC BY-ND': {
'url': 'http://creativecommons.org/licenses/by-nd/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION-NODERIVATIVES 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY-ND 4.0 INT`L LICENSE</a>."""
},
'CC BY-NC': {
'url': 'http://creativecommons.org/licenses/by-nc/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION-NONCOMMERCIAL 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY-NC 4.0 INT`L LICENSE</a>."""
},
'CC BY-NC-SA': {
'url': 'http://creativecommons.org/licenses/by-nc-sa/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION-NONCOMMERCIAL-SHAREALIKE 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY-NC-SA 4.0 INT`L LICENSE</a>."""
},
'CC BY-NC-ND': {
'url': 'http://creativecommons.org/licenses/by-nc-nd/4.0',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. THIS WORK IS LICENSED UNDER THE CREATIVE COMMONS ATTRIBUTION-NONCOMMERCIAL-NODERIVATIVES 4.0 INTERNATIONAL LICENSE. TO VIEW A COPY OF THIS LICENSE, VISIT {url}.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. THIS WORK IS LICENSED UNDER THE <a href="{url}" target="_blank" rel="noopener noreferrer">CC BY-NC-ND 4.0 INT`L LICENSE</a>."""
},
'All Rights Reserved': {
'url': '',
'template_img': """ "{title}" IS COPYRIGHT (C) {year} BY {owner}. ALL RIGHTS RESERVED. PERMISSION TO REPRODUCE CONTENT MUST BE OBTAINED FROM THE AUTHOR.""",
'template_web': """<a href="{title_url}">{title}</a> IS COPYRIGHT (C) {year} BY <a href="{owner_url}">{owner}</a>. ALL RIGHTS RESERVED. PERMISSION TO REPRODUCE CONTENT MUST BE OBTAINED FROM THE AUTHOR."""
},
}
def run_checks():
"""Run checks to expose foo in template data."""
errors = 0
# Each code in TEMPLATES should have a key in TEMPLATE_DATA
for code in TEMPLATES:
if code not in TEMPLATE_DATA:
errors += 1
LOG.error('Not found in TEMPLATE_DATA: %s', code)
# Each key in TEMPLATE_DATA should be in TEMPLATES
for code in list(TEMPLATE_DATA.keys()):
if code not in TEMPLATES:
errors += 1
LOG.error('Not found in TEMPLATES: %s', code)
# Eech element in TEMPLATE_DATA should have the required keys
required_keys = ['url', 'template_img', 'template_web']
for code, data in list(TEMPLATE_DATA.items()):
for k in list(data.keys()):
if k not in required_keys:
errors += 1
LOG.error('Code %s invalid key: %s', code, k)
for k in required_keys:
if k not in list(data.keys()):
errors += 1
LOG.error('Code %s missing key: %s', code, k)
return errors
def man_page():
"""Print manual page-like help"""
print("""
USAGE
cc_licences.py [OPTIONS]
OPTIONS
-c, --clear
Truncate the cc_licence table before updating table. Warning:
cc_licence records are referenced by other tables. Truncating may
corrupt data.
-d, --dry-run
Do not create licence records, only report what would be done.
-h, --help
Print a brief help.
--man
Print extended help.
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""")
def main():
"""Main processing."""
usage = '%prog [options]'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'-c', '--clear',
action='store_true', dest='clear', default=False,
help='Truncate cc_licence table.',
)
parser.add_option(
'-d', '--dry-run',
action='store_true', dest='dry_run', default=False,
help='Dry run. Do not create licences. Report what would be done.',
)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, unused_args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
if options.clear:
LOG.debug('Truncating cc_licence table.')
if not options.dry_run:
db.cc_licence.truncate()
db.commit()
else:
LOG.debug('Dry run. No changes made.')
LOG.info('Started.')
errors = run_checks()
if errors:
LOG.error('Aborting due to errors.')
exit(1)
for number, code in enumerate(TEMPLATES):
template_dict = TEMPLATE_DATA[code]
template_dict['code'] = code
template_dict['number'] = number
template = Storage(template_dict)
try:
cc_licence = CCLicence.by_code(code)
except LookupError:
cc_licence = None
if cc_licence:
LOG.debug('Updating: %s', template.code)
else:
LOG.debug('Adding: %s', template.code)
if not options.dry_run:
cc_licence = CCLicence.from_add(dict(code=template.code))
if not cc_licence:
raise LookupError('cc_licence not found, code: {code}'.format(
code=template.code))
if not options.dry_run:
data = dict(
code=template.code,
number=template.number,
url=template.url,
template_img=template.template_img,
template_web=template.template_web
)
CCLicence.from_updated(cc_licence, data)
else:
LOG.debug('Dry run. No changes made.')
LOG.info('Done.')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
| StarcoderdataPython |
3322779 | <filename>ofanalysis/notion/notion_fund_pool_update.py<gh_stars>0
from datetime import datetime
from loguru import logger
import requests
from ofanalysis import const
import ofanalysis.utility as ut
from ofanalysis.notion import notion_operation
import pandas as pd
def update_profit_ratio_to_notion():
def get_profit_ratio(fund_code: str, start_date: str) -> float:
today = datetime.today().date().strftime('%Y%m%d')
start_date = pd.to_datetime(start_date).strftime('%Y%m%d')
result = ut.db_get_dict_from_mongodb(
mongo_db_name=const.MONGODB_DB_TUSHARE,
col_name=const.MONGODB_COL_TS_FUND_NAV,
query_dict={
'ts_code': f'{fund_code}.OF',
'nav_date': {'$gt': start_date, '$lte': today},
}
)
if not result: # 没取到
return -999.0
nav_series = pd.DataFrame(result).sort_values(
by='nav_date', ascending=False)['accum_nav']
profit_ratio = (nav_series.iloc[0] -
nav_series.iloc[-1])/nav_series.iloc[-1]
if pd.isna(profit_ratio):
return -999.0
return round(profit_ratio, 4)
response_dict = notion_operation.get_notion_database(
notion_db_id='8aa76020501c4ca18c3c4dace60ee0ea'
)
# update profit ratio for each fund
for item in response_dict:
page_id = item['id']
fund_code = item['properties']['基金代码']['rich_text'][0]['plain_text']
logger.info(f'Processing fund code ->{fund_code}...')
start_date = item['properties']['入池日期']['date']['start']
profit_ratio = get_profit_ratio(fund_code, start_date)
notion_operation.update_notion_page(
content_dict={
'properties': {
'涨跌幅': {
'number': profit_ratio
}
}
},
page_id=page_id
)
| StarcoderdataPython |
1621727 | <reponame>Roibal/Geotechnical_Engineering_Python_Code<filename>Ventilation-Mining-Engineering/VentSurveyProgram.py<gh_stars>1-10
class VentilationSurvey(object):
"""
The purpose of this program is to automate the data analysis portion of a ventilation survey performed in
an underground mine. Accepts a .csv of input parameters from a ventilation survey data (see format) which
is then used to calculate relevant parameters of ventilation survey.
Main Resource is "Mine Ventilation And Air Conditioning" 3rd Edition by Mutmanskiy, Hartman, Ramani & Wang.
Coded 5/8/2016 by <NAME>
Copyright 2016
All Rights Reserved
"""
def __init__(self, name="Example Mine Vent Survey 1"):
"""
Constants from page 13, Mine Ventilation and Air Conditioning
All Units used are Imperial, Metric Units and Values in commented section of each variable below
"""
self.MolecularWeight = 28.97 #(M)
self.SpecificGravity = 1 #(S)
self.GasConstant = 53.35 #(R) ft*lb/lb mass*DegreeRankine (287.045 J/kg*K)
self.StandardSpecificWeight = 0.0750 #(w) lb/ft**3 Standard Conditions (1.2014 kb/m**3)
self.StandardBarometricPressure = 29.92 #(pb) in. Hg or 14.696 psi (760 mm Hg or 101.33 kPa)
self.SpecHeatConstPress = 0.2403 #(cp) Btu/lb*F (1.006 kJ/kg*C)
self.SpecHeatConstVolume = 0.1714 #(cv) Btu/lb*F (0.717 kJ/kg*C)
self.RatioSpecHeats = 1.4012 #(Gamma) constant pressure and volume for any diatomic gas
#pb = pa + pv (barometric pressure is sum of partial pressures of dry air (pa) and water vapor (pv)
self.name = name
self.vent_points = [] #A Blank List is created and will store dictionaries of each data point
print("Welcome to the Mine Ventilation Survey Python Toolbox written by <NAME>\n")
def __str__(self):
for item in self.vent_points:
print(item)
def ManometerSurvey(self, position, elevation, pressure, drybulbf, wetbulbf):
"""Example 6.3, page 208, mine ventilation and air conditioning example
A Manometer is used to calculate pressure difference (Direct Method).
"""
#Hl12 = (Hs1-Hs2)+(Hz1-Hz2) #Head Loss (1->2) components
Z1 = 1
Z2 = 2
p2 = 13.773
p3 = (13.773 + (1.51/27.69)) #Page 210, Pressure 3 = p2 + manometer reading (converted to psi)
Hl12 = (144/5.2)*(p3-p2)+(1/5.2)*(Z1-Z2)*(Ws-Wh)
def Mod_AddData(self, station_no, desc, time_r, rar, time_b, bar, alt_diff, change_alt, feet_air, head, abs_head):
if station_no.isalpha():
#Feature will be added that for loops through 1st entered line and names the following dictionary
#from the values of first column
pass
else:
def LoadData(self, loadfile = "Test.csv"):
with open(loadfile, 'r') as file1:
for line1 in file1:
data = line1.split(',')
def List(self):
for item in self.vent_points:
print(item)
def SpecWeightAir(self):
#method will be added which calculates the Specific Weight of Air which is used to calculate the proper head
#based upon Dry Bulb Temperature and Wet Bulb Temperature
pass
def ConvertHead(self):
#convert Specific Weight to Inches of Head in Water
pass
def PressureChange(self):
pass
def SaveList(self):
pass
def main():
ExampleMine = VentilationSurvey()
ExampleMine.LoadData()
ExampleMine.List()
while 1:
action = input("What would you like to do? Options: Add Data (A), Load Data (L), List Data (LD), Save (S)")
if len(action)>1:
pass
print(ExampleMine.name)
if __name__=="__main__":
main()
| StarcoderdataPython |
1763647 | def is_iterable(item):
try:
iter(item)
return True
except TypeError:
return False
def as_json(item):
if is_iterable(item):
return {i.id: i.serialize() for i in item}
return item.serialize()
# @auth.error_handler()
# def error_handler(callback):
# return jsonify({
# 'ok': False,
# 'message': 'Missing Authorization Header'
# }), 401
#
# def is_correct_token():
# auth_header = request.headers.get('Authorization')
# if auth_header:
# try:
# auth_token = auth_header.split(" ")[1]
# except IndexError:
# responseObject = {
# 'status': 'fail',
# 'message': 'Bearer token malformed.'
# }
# return False
# else:
# auth_token = ''
# def auth_required(func):
# @wraps(func)
# def decorated(*args, **kwargs):
# if request.method in EXEMPT_METHODS:
# return func(*args, **kwargs)
# elif not current_user.is_authenticated:
# return current_app.login_manager.unauthorized()
# return func(*args, **kwargs)
#
# return decorated
| StarcoderdataPython |
61338 | <gh_stars>1-10
"""Tests the feature extraction"""
# Tests are verbose.
# pylint: disable=too-many-statements, too-many-locals
import math
import numpy as np
from pytest import approx
from infrastructure import InfrastructureNetwork
from overlay import OverlayNetwork
from embedding import PartialEmbedding, ENode
from features import features_by_name
# easiest to do everything in one function, although it isn't pretty
def test_features():
"""Tests feature extractions on some hand-verified examples"""
infra = InfrastructureNetwork()
nso1 = infra.add_source(
name="nso1", pos=(0, 0), transmit_power_dbm=30, capacity=1.5
)
nso2 = infra.add_source(
name="nso2", pos=(0, 1), transmit_power_dbm=30, capacity=2.8
)
nsi = infra.set_sink(
name="nsi", pos=(2, 0), transmit_power_dbm=30, capacity=np.infty
)
ninterm = infra.add_intermediate(
name="ninterm", pos=(0, 1), transmit_power_dbm=30, capacity=0
)
overlay = OverlayNetwork()
bso1 = overlay.add_source(name="bso1", datarate=5, requirement=1)
bso2 = overlay.add_source(name="bso2", datarate=8, requirement=2)
bin1 = overlay.add_intermediate(name="bin1", datarate=5, requirement=3)
bin2 = overlay.add_intermediate(name="bin2", datarate=5, requirement=0)
bin3 = overlay.add_intermediate(name="bin3", datarate=42, requirement=0.7)
bin4 = overlay.add_intermediate(name="bin4", datarate=0, requirement=0.2)
bsi = overlay.set_sink(name="bsi", datarate=5, requirement=4)
overlay.add_link(bso1, bin1)
overlay.add_link(bso1, bin2)
overlay.add_link(bin1, bsi)
overlay.add_link(bin2, bsi)
overlay.add_link(bso2, bsi)
overlay.add_link(bso2, bin3)
overlay.add_link(bin3, bin4)
overlay.add_link(bin4, bsi)
embedding = PartialEmbedding(
infra, overlay, source_mapping=[(bso1, nso1), (bso2, nso2)]
)
eso1 = ENode(bso1, nso1)
eso2 = ENode(bso2, nso2)
esi = ENode(bsi, nsi)
erelay = ENode(bso1, ninterm, bin1)
ein = ENode(bin1, nsi)
erelay_unchosen = ENode(bso2, ninterm, bsi)
assert embedding.take_action(eso1, erelay, 0)
assert embedding.take_action(erelay, ein, 1)
feature_dict = features_by_name()
def node_feature(name, node):
assert node in embedding.nodes()
return tuple(
feature_dict["node_" + name].process_node(embedding, node)
)
assert node_feature("pos", ein) == (2, 0) # pos of nsi
assert node_feature("relay", erelay) == (1.0,)
assert node_feature("relay", erelay_unchosen) == (1.0,)
assert node_feature("relay", eso1) == (0.0,)
assert node_feature("relay", ein) == (0.0,)
assert node_feature("sink", eso2) == (0.0,)
assert node_feature("sink", ein) == (0.0,)
assert node_feature("sink", esi) == (1.0,)
num_sinks = 0
for node in embedding.nodes():
if node_feature("sink", node)[0] == 1.0:
num_sinks += 1
assert num_sinks == 1
# this is always pretending the node isn't already chosen, so the
# requirement of the block in question is always exempt
assert node_feature("remaining_capacity", eso1)[0] == approx(1.5)
assert node_feature("remaining_capacity", eso2)[0] == approx(2.8)
assert node_feature("remaining_capacity", esi)[0] == np.infty
assert node_feature("remaining_capacity", erelay)[0] == approx(0)
assert node_feature("remaining_capacity", ein)[0] == np.infty
assert node_feature("remaining_capacity", erelay_unchosen)[0] == approx(0)
assert node_feature("weight", eso1)[0] == approx(1)
assert node_feature("weight", esi)[0] == approx(4)
assert node_feature("weight", erelay)[0] == approx(0)
# the block itself should not be counted if it is already embedded
# therefore, nso1 has a remaining capacity of 1.5 (instead of 0.5)
assert node_feature("compute_fraction", eso1)[0] == approx(1 / 1.5)
assert node_feature("compute_fraction", esi)[0] == 0 # /infty
assert node_feature("compute_fraction", erelay)[0] == approx(0)
# capacity inf can always embed everything
assert node_feature("options_lost", esi)[0] == 0
# has remaining capacity .5 after, can embed bin2 or bin4; before
# also bin4
assert node_feature("options_lost", eso1)[0] == 1
# has remaining capacity .8 after, can embed bin2 or bin3 (just as
# before)
assert node_feature("options_lost", eso2)[0] == 0
# remaining capacity of .1 after, can only embed bin2 (before also
# bin4)
assert node_feature("options_lost", ENode(bin3, nso2))[0] == 1
def edge_feature(name, u, v, t):
assert (u, v, t) in embedding.graph.edges(keys=True)
return tuple(
feature_dict["edge_" + name].process_edge(embedding, u, v, t)
)
assert edge_feature("timeslot", eso1, erelay, 0)[0] == 0
assert edge_feature("timeslot", eso2, esi, 2)[0] == 2
assert edge_feature("chosen", eso1, erelay, 0)[0] == 1
assert edge_feature("chosen", eso2, esi, 2)[0] == 0
assert edge_feature("capacity", eso2, esi, 2)[0] == approx(17.61, abs=0.1)
# loops are special-cased; pretend perfect match with requirement
assert edge_feature("capacity", ein, esi, 2)[0] == 3.0
assert edge_feature("additional_timeslot", eso1, erelay, 0)[0] == 0
assert edge_feature("additional_timeslot", eso2, esi, 2)[0] == 1
assert edge_feature("datarate_requirement", eso1, erelay, 0)[0] == approx(
5
)
assert edge_feature("datarate_requirement", erelay, ein, 1)[0] == approx(5)
# Capacity if nothing else is sending (which we're assuming, since actually
# only nso1 is sending and we're ignoring the current edge)
sinr = infra.sinr(eso1.node, erelay.node, senders=frozenset())
capacity = 1 * math.log(1 + 10 ** (sinr / 10), 2)
assert edge_feature("datarate_fraction", eso1, erelay, 0)[0] == approx(
5 / capacity
)
# in this case there actually is nothing currently sending
sinr = infra.sinr(eso2.node, esi.node, senders=frozenset())
capacity = 1 * math.log(1 + 10 ** (sinr / 10), 2)
assert edge_feature("datarate_fraction", eso2, esi, 2)[0] == approx(
8 / capacity
)
# this is an edge within a node, nothing is actually sent
assert edge_feature("datarate_fraction", ein, esi, 2)[0] == 0
# since this is the only chosen edge from that block, no broadcast
assert edge_feature("is_broadcast", eso1, erelay, 0)[0] == 0
# since eso1 -> erelay1 is already chosen, broadcast
assert edge_feature("is_broadcast", eso1, ENode(bin2, nsi), 0)[0] == 1
print(embedding.why_infeasible(eso2, ENode(bin3, nso2), 0))
# take a loop in nso2
assert embedding.take_action(eso2, ENode(bin3, nso2), 2)
# this doesn't count as broadcast, looping is not actually sending
assert edge_feature("is_broadcast", eso2, esi, 2)[0] == 0
# make sure broadcasting works even with relays
# on the same node as the already taken relay that sends bso1s data
# to ein1 in ts 1
erelay2 = ENode(bso1, ninterm, bin2)
assert edge_feature("is_broadcast", erelay2, ENode(bin2, nso2), 1)[0] == 1
| StarcoderdataPython |
3319775 | #!/usr/bin/env python
from flask import Flask, jsonify, render_template, request, send_file, make_response
import json
from influxdb import InfluxDBClient
import numpy as np
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from io import BytesIO
import matplotlib.dates as mdates
from scipy.signal import savgol_filter
import functions as fn
from datetime import datetime
numDecimals2Round = 1
IPs = ['192.168.1.213','192.168.1.214','192.168.1.215','192.168.1.216']
rooms = ['kitchen','office','TV','bedroom']
options = rooms + ['avg']
influx_time_format_string_code = '%Y-%m-%dT%H:%M:%S'
client = InfluxDBClient(host='localhost', port=8086)
client.switch_database('temperature')
app = Flask(__name__)
path = '/home/albert/config_web/'
flask_info = fn.flask_info(client, IPs, rooms,influx_time_format_string_code)
flask_info.retrieve_latest_influxdb_setTemps()
@app.route('/interactive')
def interactive():
flask_info.retrieve_influxdb_temps(fahrenheit=True)
flask_info.retrieve_latest_influxdb_setTemps()
return render_template("interactive.html", flask_info = flask_info)
@app.route('/background_process',methods=['POST'])
def background_process():
req = request.form.get('data').split()
print(req)
if len(req) == 1:
flask_info.thermoSetTemps['temp2use'] = req[0]
else:
if req[2] == "+":
flask_info.thermoSetTemps[req[1] + "temp"] += 1
if req[2] == "-":
flask_info.thermoSetTemps[req[1] + "temp"] -= 1
flask_info.write_logic_to_influxdb()
flask_info.retrieve_latest_influxdb_setTemps()
return jsonify(flask_info.thermoSetTemps)
@app.route('/thermostat_plot_png/<hours>')
def thermostat_plot_png(hours):
#retrieve latest
flask_info.retrieve_influxdb_temps(timerange=hours,fahrenheit=True)
flask_info.retrieve_all_influxdb_setTemps(timerange=hours)
fig, ax = plt.subplots()
for room in rooms:
dates = mdates.date2num(flask_info.all_temps[room]['time'])
#window filter scaled based on data size
num_data_points = len(flask_info.all_temps[room]['temp'])
window_filter = int(num_data_points/5)
#ensure window filter is odd
if (window_filter % 2 == 0): window_filter += 1
yhat = savgol_filter(flask_info.all_temps[room]['temp'], window_filter, 3)
if flask_info.thermoSetTemps['temp2use'] == room:
ax.plot_date(dates,yhat,'-',linewidth=3)
else:
ax.plot_date(dates,yhat,'-')
# ax.plot_date(dates,flask_info.all_temps[room]['temp'],'o',ms=0.5)
# also show average
all_temps = np.concatenate([flask_info.all_temps[room]['temp'] for room in rooms])
all_dates = mdates.date2num(np.concatenate([flask_info.all_temps[room]['time'] for room in rooms]))
# print(all_dates)
# ax.plot_date(all_dates,all_temps,'o',ms=0.5)
_sorted = np.argsort(all_dates)
all_dates = all_dates[_sorted]
all_temps = all_temps[_sorted]
num_data_points = len(all_temps)
window_filter = int(num_data_points/2)
if (window_filter % 2 == 0): window_filter += 1
yhat = savgol_filter(all_temps, window_filter, 3)
# ax.plot_date(all_dates,all_temps,'o',ms=0.7)
if flask_info.thermoSetTemps['temp2use'] == 'avg':
ax.plot_date(all_dates,yhat,'--',linewidth=3)
else:
ax.plot_date(all_dates,yhat,'--')
# --------- PLOT SET TEMPS ----------------
dates = mdates.date2num(flask_info.all_setTemps['time'])
#append current time so we can see up to date
dates = np.append(dates,mdates.date2num(datetime.utcnow()))
_hightemp = flask_info.all_setTemps['hightemp']
_lowtemp = flask_info.all_setTemps['lowtemp']
_hightemp = np.append(_hightemp,_hightemp[-1])
_lowtemp = np.append(_lowtemp,_lowtemp[-1])
ax.plot_date(dates,_hightemp,'-',c='blue')
ax.plot_date(dates,_lowtemp,'-',c='red')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.tight_layout()
canvas = FigureCanvas(fig)
img = BytesIO()
fig.savefig(img)
img.seek(0)
# all figs are kept in memory, close so that we dont keep all of them (every 5 seconds!)
plt.close(fig)
return send_file(img, mimetype='image/png')
if __name__ == "__main__":
app.run(host='0.0.0.0')
| StarcoderdataPython |
1788148 | import typing
import asyncpg
from databases.core import Connection
from tvsched.adapters.repos.actor.models import ActorRecord
from tvsched.adapters.repos.actor.utils import (
map_actor_record_to_model,
)
from tvsched.application.exceptions.actor import (
ActorAlreadyInShowCastError,
ActorNotFoundError,
ActorOrShowNotFoundError,
)
from tvsched.application.models.actor import ActorAdd, ActorInShowCast, ActorUpdate
from tvsched.entities.actor import Actor
class ActorRepo:
def __init__(self, db: Connection) -> None:
self._db = db
async def get(self, actor_id: int) -> Actor:
"""Returns actor from repo by `actor_id`.
Args:
actor_id (int)
Raises:
ActorNotFoundError: will be raised if actor with id `actor_id` not in repo
Returns:
Actor
"""
query = """
SELECT * FROM actors
WHERE id = :id;
"""
values = dict(id=actor_id)
record = await self._db.fetch_one(query, values=values)
if record is None:
raise ActorNotFoundError(actor_id=actor_id)
actor_record = typing.cast(ActorRecord, record)
actor = map_actor_record_to_model(actor_record)
return actor
async def add(self, actor: ActorAdd) -> None:
"""Adds new actor to repo.
Args:
actor (ActorAdd): data for adding actor to repo.
Returns:
Actor
"""
query = """
INSERT INTO actors (name, image_url)
VALUES (:name, :image_url);
"""
values = dict(
name=actor.name,
image_url=actor.image_url,
)
await self._db.execute(query, values=values)
async def update(self, actor: ActorUpdate) -> None:
"""Updates actor in repo.
Args:
actor (ActorAdd): data for updating actor in repo
"""
columns_to_update = []
values = {}
name = actor.name
if name is not None:
columns_to_update.append("name = :name")
values["name"] = name
image_url = actor.image_url
if image_url is not None:
columns_to_update.append("image_url = :image_url")
values["image_url"] = image_url
query = f"""
UPDATE actors
SET {", ".join(columns_to_update)}
WHERE id = :id;
"""
values["id"] = actor.id
await self._db.execute(query, values=values)
async def delete(self, actor_id: int) -> None:
"""Deletes actor with id `actor_id` from repo.
Args:
actor_id (int)
"""
query = """
DELETE FROM actors
WHERE id = :id;
"""
values = dict(id=actor_id)
await self._db.execute(query, values=values)
async def add_actor_to_show_cast(self, actor_in_cast: ActorInShowCast) -> None:
"""Adds actor with id `actor_in_cast.actor_id` to show cast with id `actor_in_cast.show_id`.
Args:
actor_in_cast (ActorInShowCast): data for adding actor to show cast
"""
query = """
INSERT INTO actors_to_shows (show_id, actor_id)
VALUES (:show_id, :actor_id);
"""
values = dict(show_id=actor_in_cast.show_id, actor_id=actor_in_cast.actor_id)
try:
await self._db.execute(query, values=values)
except asyncpg.exceptions.ForeignKeyViolationError:
raise ActorOrShowNotFoundError(actor_in_cast)
except asyncpg.exceptions.UniqueViolationError:
raise ActorAlreadyInShowCastError(actor_in_cast)
async def delete_actor_from_show_cast(self, actor_in_cast: ActorInShowCast) -> None:
"""Deletes actor with id `actor_in_cast.actor_id` from show cast with id `actor_in_cast.show_id`.
Args:
actor_in_cast (ActorInShowCast): data for deleting actor from show cast
"""
query = """
DELETE FROM actors_to_shows
WHERE show_id = :show_id AND actor_id = :actor_id;
"""
values = dict(show_id=actor_in_cast.show_id, actor_id=actor_in_cast.actor_id)
await self._db.execute(query, values=values)
| StarcoderdataPython |
139658 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import linear_model
plt.style.use('fivethirtyeight')
datafile = 'datafile.txt'
data = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=True)
X = np.transpose(np.array(data[:-1]))
Y = np.transpose(np.array(data[-1:]))
pos = np.array([X[i] for i in xrange(X.shape[0]) if Y[i] == 1])
neg = np.array([X[i] for i in xrange(X.shape[0]) if Y[i] == 0])
def plot_data():
#plt.scatter(X[:,0],X[:,1],c=Y,edgecolors='k',cmap=plt.cm.Paired)
plt.plot(pos[:,0],pos[:,1],'wo',label='Admitted')
plt.plot(neg[:,0],neg[:,1],'bo',label='Not Admitted')
plt.xlabel('Exam 1 Score')
plt.ylabel('Exam 2 Score')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend()
plt.grid(True)
y_arr = data[-1:]
y_arr = y_arr.flatten()
#print y_arr.flatten()
#print y_arr
#step value for creating meshgrid
h = 0.5
logreg = linear_model.LogisticRegression(C=1e5)
logreg.fit(X,y_arr)
x_min, x_max = X[:,0].min() - .5, X[:,0].max() + .5
y_min, y_max = X[:,1].min() - .5, X[:,1].max() + .5
xx,yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))
Z = logreg.predict(np.c_[xx.ravel(),yy.ravel()])
Z = Z.reshape(xx.shape)
plt.figure(figsize=(10,8))
plt.pcolormesh(xx,yy,Z,cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plot_data()
plt.show()
| StarcoderdataPython |
3370603 | <reponame>brendanhoran/nmea_faker
# Author <NAME>
# License : BSD 3-Clause
# Description : Get a NMEA setence from the NMEA faker sever
import wifi_setup
import nmea_client
import time
wifi_setup = wifi_setup.WIFI_setup('YOUR_SSID','YOUR_PASSWORD')
wifi_setup.connect()
# format is, RX pin, TX pin, IP address and port of server
nmea_client = nmea_client.NMEA_client(25,26,'127.0.0.1:5000')
time.sleep(10)
print("Starting NMEA client, REPL will not spawn")
task_start_time = time.time()
while True:
nmea_client.get_sentence()
time.sleep(60.0 - ((time.time() - task_start_time) % 60.0))
| StarcoderdataPython |
3313083 | <reponame>jianershi/algorithm<gh_stars>1-10
"""
404. Subarray Sum II
https://www.lintcode.com/problem/subarray-sum-ii/description
prefix sum definition including itself
"""
class Solution:
"""
@param A: An integer array
@param start: An integer
@param end: An integer
@return: the number of possible answer
"""
def subarraySumII(self, A, start, end):
# write your code here
n = len(A)
prefix_sum = [0] * n
l = r = 0
count = 0
for j in range(n):
if j == 0:
prefix_sum[j] = A[0]
else:
prefix_sum[j] = prefix_sum[j - 1] + A[j]
"""
.....xxxoooooxxxxx..j
l r
"""
while l <= j and prefix_sum[j] - prefix_sum[l - 1] > end:
l += 1
while r <= j and prefix_sum[j] - prefix_sum[r - 1] >= start:
r += 1
count += r - l
return count
| StarcoderdataPython |
28982 | import os
import enum
import hashlib
from urllib.parse import urljoin
from flask import url_for, current_app as app
from mcarch.app import db, get_b2bucket
class StoredFile(db.Model):
"""Represents a file stored in some sort of storage medium."""
__tablename__ = 'stored_file'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
sha256 = db.Column(db.String(130), nullable=False)
upload_by_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=True)
upload_by = db.relationship('User')
# Path to this file within the B2 bucket. Null if file is not on B2.
b2_path = db.Column(db.String(300), nullable=True)
def b2_download_url(self):
"""Gets the URL to download this file from the archive's B2 bucket."""
if self.b2_path:
return urljoin(app.config['B2_PUBLIC_URL'], self.b2_path)
def gen_b2_path(filename, sha):
"""Generates the path where a file should be stored in B2 based on name and hash."""
return os.path.join(sha, filename)
def sha256_file(path):
BUF_SZ = 65536
h = hashlib.sha256()
with open(path, 'rb') as f:
buf = f.read(BUF_SZ)
while buf:
h.update(buf)
buf = f.read(BUF_SZ)
return h.hexdigest()
def upload_b2_file(path, name, user=None):
"""Uploads a local file to B2, adds it to the DB, and returns the StoredFile.
This adds the StoredFile to the database and does a commit.
@param path: path to the file on disk
@param name: name of the file as it should be in B2
@param user: user to associate the stored file with. Can be None
"""
bucket = get_b2bucket()
fhash = sha256_file(path)
b2path = gen_b2_path(name, fhash)
bucket.upload_local_file(path, b2path)
stored = StoredFile(name=name, sha256=fhash, b2_path=b2path, upload_by=user)
db.session.add(stored)
db.session.commit()
return stored
| StarcoderdataPython |
1764501 | from copy import deepcopy
from django.urls import reverse
from django.utils import timezone
from core.utils import getEbayStrGotDateTimeObj
from .base import SetupUserItemsFoundAndUserFindersWebTest
from ..forms import ( ItemFoundForm, UserItemFoundUploadForm,
UserItemFoundForm )
from brands.models import Brand
from categories.models import Category
from models.models import Model
from ..models import UserItemFound
from pprint import pprint
# need to test form validation for new hits & updated hits
# helpful:
# https://stackoverflow.com/questions/2257958/django-unit-testing-for-form-edit
class TestAddingEditingUserHits( SetupUserItemsFoundAndUserFindersWebTest ):
#
def test_add_new_hit( self ):
#
'''
qsModels = Model.objects.all()
#
if len( qsModels ) > 0:
for oModel in qsModels:
#
print( oModel, oModel.iBrand, oModel.iCategory )
#
else:
print( 'no models' )
#
models in table:
470mF Digital Capacitor Checker
601b Cadillac Manual
Calais Cadillac Manual
Fleetwood Cadillac Manual
Model 2 Cadillac Manual
properties in self:
oBrand_hp Hewlett-Packard
oBrand_GT Groove Tube
oBrand Cadillac
oCategory Manual
oSearch My clever search 1
oUserItemFound Digital Capacitance Tester Capacitor Meter Auto Range Multimeter Checker 470mF
print()
#
for k, v in self.__dict__.items():
if k and k.startswith( '_' ): continue
print( k, v )
'''
#qsUserItemsFound = UserItemFound.objects.filter( iUser = self.user1 )
#
#print( 'UserItemsFound already in table:' )
#for oUserItem in qsUserItemsFound:
##
#print( ' ',
#oUserItem.iItemNumb,
#oUserItem.iHitStars,
#oUserItem.iSearch,
#'\n Model:',
#oUserItem.iModel,
#'\n Brand:',
#oUserItem.iBrand,
#'\n Category:',
#oUserItem.iCategory )
#
# Digital Capacitance Tester Capacitor Meter Auto Range Multimeter Checker 470mF
# 75
# My clever search 1
# Fleetwood
# Cadillac
# Manual
#
# webtest style
#
'''can add brand name not in there yet,
cannot add a brand name already there'''
#
self.user = self.user1
#
oFleetwood = Model.objects.get(
cTitle = 'Fleetwood',
iUser = self.user1 )
#
o601b = Model.objects.get( cTitle = '601b',iUser = self.user1 )
#
dNewHit = dict(
iItemNumb = self.oUserItemFound.iItemNumb,
iHitStars = 5,
iSearch = self.oSearch,
iModel = None, # oFleetwood.id, form error on any model??
iBrand = self.oBrand, # Cadillac
iCategory = self.oCategory, # manual
iUser = self.user1 )
#
oNewHit = UserItemFound( **dNewHit )
oNewHit.save()
#
dFormData = dict(
iItemNumb = self.iItemNumb,
iHitStars = 5,
iSearch = self.oSearch.id,
iModel = None, # oFleetwood.id, form error on any model??
iBrand = self.oBrand.id, # Cadillac
iCategory = self.oCategory.id, # manual
tTimeEnd = timezone.now(),
iUser = self.user1.id )
#
#sAddNewHitURL = reverse( 'finders:add' )
##
#session = self.client.session
##
#session['iItemNumb'] = self.iItemNumb
#session['iSearch' ] = self.oSearch
#session['sTimeEnd' ] = getEbayStrGotDateTimeObj( timezone.now() )
##
## below ends up calling finders/views.py
## initial['iItemNumb'] = session['iItemNumb']
##
#oForm = self.app.get( sAddNewHitURL ).form
##
#oForm['iItemNumb'] = self.iItemNumb
#oForm['iHitStars'] = 5
#oForm['iSearch' ] = self.oSearch.id
#oForm['iModel' ] = ''
#oForm['iBrand' ] = self.oBrand.id
#oForm['iCategory'] = self.oCategory.id
#oForm['iUser' ] = self.user1.id
#
form = UserItemFoundForm( data = dFormData )
#
# print( 'in test_forms:', form['iItemNumb'].value() )
#
form.user = self.user1 # need this!
form.request = self.request
#
#print( 'iBrand:', form['iBrand'].value() )
#print( Brand.objects.get( id = form['iBrand'].value() ) )
#print( 'self.user:', self.user )
##
#print( 'Brands in form queryset:', form.fields["iBrand"].queryset )
#print( Brand.objects.filter( iUser = self.user ) )
#
if form.errors:
print()
print('form error(s):')
for k, v in form.errors.items():
print( k, ' -- ', v )
#
#print( 'iSearch: ', form['iSearch' ].value() )
#print( 'tTimeEnd:', form['tTimeEnd'].value() )
#print( 'iUser: ', form['iUser' ].value() )
#
#form.is_valid()
#
self.assertTrue( form.is_valid() )
#
# try again, this time with something different
#
'''
#
dFormData['iModel'] = o601b .id
dFormData['iBrand'] = self.oBrand_hp.id
#
form = UserItemFoundForm( data = dFormData )
#
form.user = self.user1 # need this!
form.request = self.request
#
# form does not accept any model???
#
self.assertFalse( form.is_valid() )
#
#
dFormData['iModel'] = None
#
form = UserItemFoundForm( data = dFormData )
#
form.user = self.user1 # need this!
form.request = self.request
#
self.assertTrue( form.is_valid() )
#
# form.save()
#
# try the same again
#
oCopyItem = deepcopy( self.oUserItemFound )
#
print( "oCopyItem.iModel:", oCopyItem.iModel )
#
oCopyItem.iModel = None
#
oCopyItem.save()
#
dFormData['iBrand'] = self.oBrand.id
#
form = UserItemFoundForm( data = dFormData )
#
form.user = self.user1 # need this!
form.request = self.request
#
self.assertFalse( form.is_valid() )
#
'''
# this is what we already have, so form should block!
#
#dFormData = dict(
#iItemNumb = self.iItemNumb,
#iHitStars = 5,
#iSearch = self.oSearch,
#iModel = oFleetwood.id,
#iBrand = self.oBrand.id, # Cadillac
#iCategory = self.oCategory.id, # manual
#iUser = self.user1 )
##print( 'self.oModel :', self.oModel )
##print( 'self.oBrand :', self.oBrand )
##print( 'self.oCategory:', self.oCategory )
#form.request = self.request
#self.assertFalse( form.is_valid() )
#dErrors = dict( form.errors )
##
#self.assertIn( 'iModel', dErrors )
#self.assertEqual( len( dErrors ), 1 )
#lMsg = ['Select a valid choice. '
#'That choice is not one of the available choices.']
#self.assertEqual( lMsg, dErrors['iModel'] )
##
##print( 'data before:' )
##pprint( dFormData )
###
#dFormData = dict(
#iItemNumb = self.iItemNumb,
#iHitStars = 5,
#iSearch = self.oSearch,
#iModel = o601b.id,
#iBrand = self.oBrand.id, # Cadillac
#iCategory = self.oCategory.id, # manual
#iUser = self.user1 )
##
#form = UserItemFoundForm( data = dFormData )
##print( 'data after:' )
##pprint( dFormData )
###
###
#form = UserItemFoundForm( data = dFormData )
#form.request = self.request
#self.assertTrue( form.is_valid() )
#
| StarcoderdataPython |
66798 | """create exchange market table
Revision ID: 76f253d77eba
Revises: 4d9ca085df42
Create Date: 2019-09-22 01:32:11.855978
"""
from antalla.settings import TABLE_PREFIX
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "76f253d77eba"
down_revision = "4d9ca085df42"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
TABLE_PREFIX + "exchange_markets",
sa.Column("volume_usd", sa.Float),
sa.Column("quoted_volume", sa.Float, nullable=False),
sa.Column("quoted_vol_timestamp", sa.DateTime),
sa.Column("vol_usd_timestamp", sa.DateTime),
sa.Column("agg_orders_count", sa.Integer, server_default="0", nullable=False),
sa.Column(
"quoted_volume_id",
sa.String,
sa.ForeignKey(TABLE_PREFIX + "coins.symbol"),
nullable=False,
),
sa.Column("original_name", sa.String, nullable=False),
sa.Column(
"first_coin_id",
sa.String,
sa.ForeignKey(TABLE_PREFIX + "coins.symbol"),
nullable=False,
index=True,
),
sa.Column(
"second_coin_id",
sa.String,
sa.ForeignKey(TABLE_PREFIX + "coins.symbol"),
nullable=False,
index=True,
),
sa.Column(
"exchange_id",
sa.Integer,
sa.ForeignKey(TABLE_PREFIX + "exchanges.id"),
nullable=False,
index=True,
),
sa.PrimaryKeyConstraint("first_coin_id", "second_coin_id", "exchange_id"),
sa.ForeignKeyConstraint(
["first_coin_id", "second_coin_id"],
[
TABLE_PREFIX + "markets.first_coin_id",
TABLE_PREFIX + "markets.second_coin_id",
],
),
sa.Index("exchange-market-fk-idx", "first_coin_id", "second_coin_id"),
sa.Index(
"exchange-market-fk-full-idx",
"first_coin_id",
"second_coin_id",
"exchange_id",
),
sa.Index("exchange-market-agg_orders_count_idx", "agg_orders_count"),
)
def downgrade():
op.drop_table(TABLE_PREFIX + "exchange_markets")
| StarcoderdataPython |
3346180 | <filename>Arquivos/Lendo as linhas do arquivo.py
"""
Programa 116
Área de estudos.
data 13.12.2020 (Indefinida) Hs
@Autor: <NAME>
"""
# Abrimos o arquivo para leitura.
arquivo = open('/home/abraao/Documentos/testando.txt', 'rt') # Ou 'r' modo que nos permite ler.
for linha in arquivo: # Lendo linha por linha do arquivo.
print(linha)
arquivo.close()
# Ou
arquivo = open('/home/abraao/Documentos/testando.txt', 'r') # Ou 'rt' modo que nos permite ler.
linha = arquivo.readline() # utilizando método 'readline' para ler as linhas do arquivo.
while linha:
print(linha)
linha = arquivo.readline() # Direcionando o ponteiro de execução para proxima linha.
arquivo.close()
| StarcoderdataPython |
3344670 | """
This file is part of pysofar: A client for interfacing with Sofar Oceans Spotter API
Contents: Classes for representing devices and data grabbed from the api
Copyright (C) 2019
Sofar Ocean Technologies
Authors: <NAME>
"""
from pysofar.sofar import SofarApi, WaveDataQuery
# --------------------- Devices ----------------------------------------------#
class Spotter:
"""
Class to represent a spotter object
"""
def __init__(self, spotter_id: str, name: str):
"""
:param spotter_id: The spotter id as a string
:param name: The name of the spotter
"""
self.id = spotter_id
self.name = name
# cached spotter data
self._data = None
# spotter parameters
self._mode = None
self._latitude = None
self._longitude = None
self._battery_power = None
self._battery_voltage = None
self._solar_voltage = None
self._humidity = None
self._timestamp = None
self._session = SofarApi()
# -------------------------- Properties -------------------------------------- #
@property
def mode(self):
"""
The tracking type of the spotter.
3 Modes are possible:
- waves_standard
- waves_spectrum (Includes spectrum data)
- tracking
:return: The current mode of the spotter
"""
return self._mode
@mode.setter
def mode(self, value):
"""
Sets the mode of the spotter
:param value: Either 'full , 'waves', or 'track' else throws exception
"""
if value == 'full':
self._mode = 'waves_spectrum'
elif value == 'waves':
self._mode = 'waves_standard'
elif value == 'track':
self._mode = 'tracking'
else:
raise Exception('Invalid Mode')
@property
def lat(self):
"""
:return: The most recent latitude value (since updating)
"""
return self._latitude
@lat.setter
def lat(self, value): self._latitude = value
@property
def lon(self):
"""
:return: The most recent longitude value (since updating)
"""
return self._longitude
@lon.setter
def lon(self, value): self._longitude = value
@property
def battery_voltage(self):
"""
:return: Battery voltage of the spotter
"""
return self._battery_voltage
@battery_voltage.setter
def battery_voltage(self, value): self._battery_voltage = value
@property
def battery_power(self):
"""
:return: The most recent battery_power value (since updating)
"""
return self._battery_power
@battery_power.setter
def battery_power(self, value): self._battery_power = value
@property
def solar_voltage(self):
"""
:return: The most recent solar voltage level (since updating)
"""
return self._solar_voltage
@solar_voltage.setter
def solar_voltage(self, value): self._solar_voltage = value
@property
def humidity(self):
"""
:return: The most recent humidity value (since updating)
"""
return self._humidity
@humidity.setter
def humidity(self, value): self._humidity = value
@property
def timestamp(self):
"""
The time value at which the current spotter last recorded data
:return: ISO8601 formatted string
"""
return self._timestamp
@timestamp.setter
def timestamp(self, value): self._timestamp = value
@property
def data(self):
"""
:return: Cached data from the latest update
"""
return self._data
@data.setter
def data(self, value): self._data = value
# -------------------------- API METHODS -------------------------------------- #
def change_name(self, new_name: str):
"""
Updates the spotters name in the Sofar Database
:param new_name: The new desired spotter name
"""
self.name = self._session.update_spotter_name(self.id, new_name)
def download_datafile(self, start_date, end_date):
"""
Download a datafile container this spotters data from start_date to end_date
:param start_date: Start date string
:param end_date: End date String
"""
from pysofar.tools import parse_date
self._session.grab_datafile(self.id, parse_date(start_date), parse_date(end_date))
def update(self):
"""
Updates this spotter's attribute values.
:return: The data last recorded by the current spotter
"""
# TODO: also add the latest data for this (Since it does return it)
_data = self._session.get_latest_data(self.id)
self.name = _data['spotterName']
self._mode = _data['payloadType']
self._latitude = _data['track'][-1]['latitude']
self._longitude = _data['track'][-1]['longitude']
self._timestamp = _data['track'][-1]['timestamp']
self._battery_power = _data['batteryPower']
self._battery_voltage = _data['batteryVoltage']
self._solar_voltage = _data['solarVoltage']
self._humidity = _data['humidity']
wave_data = _data['waves']
track_data = _data['track']
freq_data = _data['frequencyData']
results = {
'wave': wave_data[-1] if len(wave_data) > 0 else None,
'tracking': track_data[-1] if len(track_data) > 0 else None,
'frequency': freq_data[-1] if len(freq_data) > 0 else None
}
self._data = results
def latest_data(self, include_wind: bool = False, include_directional_moments: bool = False):
"""
:param include_wind:
:param include_directional_moments:
:return:
"""
_data = self._session.get_latest_data(self.id,
include_wind_data=include_wind,
include_directional_moments=include_directional_moments)
wave_data = _data['waves']
track_data = _data['track']
freq_data = _data['frequencyData']
results = {
'wave': wave_data[-1] if len(wave_data) > 0 else None,
'tracking': track_data[-1] if len(track_data) > 0 else None,
'frequency': freq_data[-1] if len(freq_data) > 0 else None
}
return results
def grab_data(self, limit: int = 20,
start_date: str = None, end_date: str = None,
include_waves: bool = True, include_wind: bool = False,
include_track: bool = False, include_frequency_data: bool = False,
include_directional_moments: bool = False,
smooth_wave_data: bool = False,
smooth_sg_window: int = 135,
smooth_sg_order: int = 4,
interpolate_utc: bool = False,
interpolate_period_seconds: int = 3600):
"""
Grabs the requested data for this spotter based on the given keyword arguments
:param limit: The limit for data to grab. Defaults to 20, For frequency data max of 100 samples at a time,
else, 500 samples. If you send values over the limit, it will automatically limit for you
:param start_date: ISO 8601 formatted date string. If not included defaults to beginning of spotters history
:param end_date: ISO 8601 formatted date string. If not included defaults to end of spotter history
:param include_waves: Defaults to True. Set to False if you do not want the wave data in the returned response
:param include_wind: Defaults to False. Set to True if you want wind data in the returned response
:param include_track: Defaults to False. Set to True if you want tracking data in the returned response
:param include_frequency_data: Defaults to False. Only applies if the spotter is in 'Full Waves mode' Set to
True if you want frequency data in the returned response
:param include_directional_moments: Defaults to False. Only applies if the spotter is in 'Full Waves mode' and
'include_frequency_data' is True. Set True if you want the frequency data
returned to also include directional moments
:return: Data as a json based on the given query paramters
"""
_query = WaveDataQuery(self.id, limit, start_date, end_date)
_query.waves(include_waves)
_query.wind(include_wind)
_query.track(include_track)
_query.frequency(include_frequency_data)
_query.directional_moments(include_directional_moments)
_query.smooth_wave_data(smooth_wave_data)
_query.smooth_sg_window(smooth_sg_window)
_query.smooth_sg_order(smooth_sg_order)
_query.interpolate_utc(interpolate_utc)
_query.interpolate_period_seconds(interpolate_period_seconds)
_data = _query.execute()
return _data
| StarcoderdataPython |
3220443 | from eICU_preprocessing.split_train_test import create_folder
from torch.optim import Adam
from models.tpc_model import TempPointConv
from models.experiment_template import ExperimentTemplate
from models.initialise_arguments import initialise_tpc_arguments
from models.final_experiment_scripts.best_hyperparameters import best_tpc
class TPC(ExperimentTemplate):
def setup(self):
self.setup_template()
self.model = TempPointConv(config=self.config,
F=41,
D=self.train_datareader.D,
no_flat_features=self.train_datareader.no_flat_features).to(device=self.device)
self.elog.print(self.model)
self.optimiser = Adam(self.model.parameters(), lr=self.config.learning_rate, weight_decay=self.config.L2_regularisation)
return
if __name__=='__main__':
c = initialise_tpc_arguments()
c['exp_name'] = 'TPCNoLabs'
c['dataset'] = 'eICU'
c['no_labs'] = True
c = best_tpc(c)
log_folder_path = create_folder('models/experiments/final/eICU/LoS', c.exp_name)
tpc = TPC(config=c,
n_epochs=c.n_epochs,
name=c.exp_name,
base_dir=log_folder_path,
explogger_kwargs={'folder_format': '%Y-%m-%d_%H%M%S{run_number}'})
tpc.run() | StarcoderdataPython |
1784102 | # Generated by Django 3.0.6 on 2020-06-22 13:14
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("components", "0001_initial"),
("cases", "0024_auto_20200525_0634"),
("algorithms", "0025_algorithmimage_queue_override"),
]
operations = [
migrations.AlterModelOptions(
name="job", options={"ordering": ("created",)},
),
migrations.AlterModelOptions(
name="result", options={"ordering": ("created",)},
),
migrations.AddField(
model_name="algorithm",
name="inputs",
field=models.ManyToManyField(
related_name="algorithm_inputs",
to="components.ComponentInterface",
),
),
migrations.AddField(
model_name="algorithm",
name="outputs",
field=models.ManyToManyField(
related_name="algorithm_outputs",
to="components.ComponentInterface",
),
),
migrations.AddField(
model_name="job",
name="comment",
field=models.TextField(blank=True, default=""),
),
migrations.AddField(
model_name="job",
name="inputs",
field=models.ManyToManyField(
related_name="algorithms_jobs_as_input",
to="components.ComponentInterfaceValue",
),
),
migrations.AddField(
model_name="job",
name="outputs",
field=models.ManyToManyField(
related_name="algorithms_jobs_as_output",
to="components.ComponentInterfaceValue",
),
),
migrations.AddField(
model_name="job",
name="public",
field=models.BooleanField(
default=False,
help_text="If True, allow anyone to download this result along with the input image. Otherwise, only the job creator and algorithm editor(s) will have permission to download and view this result.",
),
),
migrations.AlterField(
model_name="job",
name="image",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="cases.Image",
),
),
]
| StarcoderdataPython |
3299033 | <filename>python/python-008/lambda_function.py
import json
def lambda_handler(event, context):
print(json.dumps(event))
return {
'status': 200,
'headers': {
'Content-Type': 'application/json'
},
'body': json.dumps({'message': 'OK'})
}
| StarcoderdataPython |
27349 | #!/usr/bin/env python
# -------- BEGIN LICENSE BLOCK --------
# Copyright 2022 FZI Forschungszentrum Informatik
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the {copyright_holder} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------- END LICENSE BLOCK --------
from threading import Lock
import unittest
import rospy
from std_msgs.msg import Int32
from ros_bt_py_msgs.msg import Node as NodeMsg
from ros_bt_py.node_config import NodeConfig
from ros_bt_py.nodes.topic import TopicPublisher
PKG = 'ros_bt_py'
class TestTopicPublisherLeaf(unittest.TestCase):
"""This expects a test_topics_node.py instance running alongside
That node will "reflect" anything we publish to /numbers_in - it's a
separate node to avoid threading shenanigans in here.
"""
def setUp(self):
self.publisher_leaf = TopicPublisher(options={
'topic_name': '/numbers_in',
'topic_type': Int32
})
self.publisher_leaf.setup()
self._lock = Lock()
self.msg = None
self.subscriber = rospy.Subscriber('/numbers_out', Int32, self.cb)
rospy.wait_for_message('/ready', Int32)
def tearDown(self):
self.publisher_leaf.shutdown()
def cb(self, msg):
with self._lock:
self.msg = msg
def testSendsNumber(self):
self.assertIsNone(self.msg)
self.publisher_leaf.inputs['message'] = Int32(data=1)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 1)
self.publisher_leaf.inputs['message'] = Int32(data=42)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 42)
self.assertEqual(self.publisher_leaf.untick(), NodeMsg.IDLE)
self.publisher_leaf.reset()
self.publisher_leaf.inputs['message'] = Int32(data=23)
self.publisher_leaf.tick()
# This should basically never fail - anything that can go wrong should
# go wrong in the setup() method
self.assertEqual(self.publisher_leaf.state, NodeMsg.SUCCEEDED)
rospy.sleep(0.1)
self.assertEqual(self.msg.data, 23)
if __name__ == '__main__':
rospy.init_node('test_topic_publish_leaf')
import rostest
import sys
import os
os.environ['COVERAGE_FILE'] = '%s.%s.coverage' % (PKG, 'test_topic_publish_leaf')
rostest.rosrun(PKG, 'test_topic_publish_leaf', TestTopicPublisherLeaf,
sysargs=sys.argv + ['--cov'])
| StarcoderdataPython |
133806 | <gh_stars>0
"""
Functions and classes related to working with Python's native asyncio support
To avoid issues with the ``async`` keyword, this file is named ``asyncx`` instead of ``async``
**Copyright**::
+===================================================+
| © 2019 Privex Inc. |
| https://www.privex.io |
+===================================================+
| |
| Originally Developed by Privex Inc. |
| |
| Core Developer(s): |
| |
| (+) Chris (@someguy123) [Privex] |
| (+) Kale (@kryogenic) [Privex] |
| |
+===================================================+
Copyright 2019 Privex Inc. ( https://www.privex.io )
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import asyncio
def run_sync(func, *args, **kwargs):
"""
Run an async function synchronously (useful for REPL testing async functions)
Usage:
>>> async def my_async_func(a, b, x=None, y=None):
... return a, b, x, y
>>>
>>> run_sync(my_async_func, 1, 2, x=3, y=4)
(1, 2, 3, 4,)
:param callable func: An asynchronous function to run
:param args: Positional arguments to pass to ``func``
:param kwargs: Keyword arguments to pass to ``func``
"""
coro = asyncio.coroutine(func)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(future)
def async_sync(f):
"""
Async Synchronous Decorator, borrowed from https://stackoverflow.com/a/23036785/2648583 - added this PyDoc comment
and support for returning data from a synchronous function
Allows a non-async function to run async functions using ``yield from`` - and can also return data
Useful for unit testing, since unittest.TestCase functions are synchronous.
Example async function:
>>> async def my_async_func(a, b, x=None, y=None):
... return a, b, x, y
...
Using the above async function with a non-async function:
>>> @async_sync
... def sync_function():
... result = yield from my_async_func(1, 2, x=3, y=4)
... return result
...
>>> r = sync_function()
>>> print(r)
(1, 2, 3, 4,)
>>> print(r[1])
2
"""
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(future)
return wrapper
| StarcoderdataPython |
1686217 | <filename>exe1103_teste.py
import unittest
from exe1103_employee import Employee
class TestEmployee(unittest.TestCase):
"""[Testes para a classe Employee]
"""
def setUp(self):
"""[Cria dois testes para Employee.]
"""
self.junior = Employee('junior', 'pedroso', 10000)
def test_give_default_raise(self):
"""[Testa se a função com aumento default funciona]
"""
self.junior.give_raise()
self.assertEqual(self.junior.salario, 15000)
def test_give_custom_raise(self):
"""[Testa se a função com outro valor de aumento funciona]
"""
self.junior.give_raise(8000)
self.assertEqual(self.junior.salario, 18000)
unittest.main()
| StarcoderdataPython |
1636583 | #!/usr/bin/env python
from ecmwfapi import ECMWFDataServer
import datetime
import dateutil.parser
import sys
erai_info = {}
erai_info["class"] = "ei"
erai_info["dataset"] = "interim"
erai_info["expver"] = "1"
erai_info["grid"] = "0.75/0.75"
erai_info["levtype"] = "sfc"
erai_info["param"] = "39.128/40.128/41.128/42.128/134.128/139.128/146.128/147.128/159.128/165.128/166.128/167.128/168.128/169.128/170.128/175.128/176.128/177.128/183.128/228.128/236.128"
erai_info["step"] = "3/6/9/12"
erai_info["stream"] = "oper"
erai_info["time"] = "00/12"
erai_info["type"] = "fc"
erai_info["format"] = "netcdf"
if len(sys.argv)==1:
print "Command line syntax is:"
print " python get_erai.py <country>"
print "where <country> can be;"
print " Australia"
print " USA"
sys.exit
if sys.argv[1].lower()=="australia":
erai_info["area"] = "-10/110/-45/155"
target_directory = "/run/media/cilli/3637-3538/Data/OzFlux/"
start_date = "2017-01-01"
end_date = "2017-12-31"
elif sys.argv[1].lower()=="nz":
erai_info["area"] = "-30/165/-50/180"
target_directory = "/run/media/cilli/cillidata/cilli/1_Work/1_OzFlux/Sites/ERAI/NZ/"
start_date = "2017-01-01"
end_date = "2017-12-31"
elif sys.argv[1].lower()=="usa":
erai_info["area"] = "70/229.5/30/300"
target_directory = "/home/peter/AmeriFlux/ERAI/"
start_date = "2017-01-01"
end_date = "2017-10-31"
elif sys.argv[1].lower()=="china":
erai_info["area"] = "60/80/20/140"
target_directory = "/home/peter/ChinaFlux/ERAI/"
start_date = "2014-01-01"
end_date = "2014-12-31"
else:
print "Unrecognised country option entered on command line"
print "Valid country options are:"
print " australia"
print " usa"
sys.exit()
server = ECMWFDataServer()
sd = dateutil.parser.parse(start_date)
ed = dateutil.parser.parse(end_date)
start_year = sd.year
end_year = ed.year
year_list = range(start_year,end_year+1)
for year in year_list:
print " Processing year: ",str(year)
sds = str(year)+"-01-01"
edc = datetime.datetime(year+1,1,1,0,0,0)
eds = edc.strftime("%Y-%m-%d")
if ed < edc:
eds = ed.strftime("%Y-%m-%d")
erai_info["date"] = sds+"/to/"+eds
#print sds+"/to/"+eds
erai_info["target"] = target_directory+"ERAI_"+str(year)+".nc"
server.retrieve(erai_info)
| StarcoderdataPython |
117570 | from suds.client import Client
from suds.plugin import MessagePlugin
from suds.cache import FileCache
from .http import HttpTransport
from . import settings
import logging
logger = logging.getLogger(__name__)
#: Cache of :class:`suds.client.Client <suds.client.Client>` objects
#: When unit-testing SOAP APIs it's probably wise to reset this to an empty
#: dictionary in-between tests.
clients = {}
# Set cache deletion preference
FileCache.remove_default_location_on_exit = settings.REMOVE_CACHE_ON_EXIT
class LogPlugin(MessagePlugin):
"""Suds plugin used in DEBUG mode. Logs all incoming and outgoing XML data at the DEBUG level."""
def __init__(self, prefix):
self.prefix = prefix
def sending(self, context):
"""Called when sending a SOAP request"""
logger.debug("%s Request: %s" % (self.prefix, context.envelope))
def received(self, context):
"""Called when receiving a SOAP response"""
logger.debug("%s Response: %s" % (self.prefix, context.reply))
def get_transport():
"""
Build a new :class:`soap.http.HttpTransport <soap.http.HttpTransport>` object. Unit tests can patch this
function to return a custom transport object for the client to use. This can be useful when trying to mock
an API rather than actually call it during a test.
:return: :class:`soap.http.HttpTransport <soap.http.HttpTransport>` object
:rtype: soap.http.HttpTransport
"""
return HttpTransport()
def get_client(wsdl, log_prefix, plugins=[], **kwargs):
"""
Get a SOAP Client object for the given WSDL. Client objects are cached in :attr:`soap.clients <soap.clients>`
and keyed by the WSDL URL.
:param wsdl: String URL of a SOAP WSDL
:param log_prefix: String prefix to prepend to log lines (when logging XML traffic in DEBUG mode)
:param plugins: List of additional plugins :class:`suds.plugin.Plugin <suds.plugin.Plugin>` to pass on to the
:class:`suds.client.Client <suds.client.Client>` object.
:param kwargs: Optional keyword arguments to pass on to the :class:`suds.client.Client <suds.client.Client>` object
:return: :class:`suds.client.Client <suds.client.Client>` object
:rtype: suds.client.Client
"""
if wsdl in settings.WSDL_INTERCEPTS:
wsdl = settings.WSDL_INTERCEPTS[wsdl]
if wsdl not in clients:
if settings.DEBUG:
plugins.append(LogPlugin(log_prefix))
try:
clients[wsdl] = Client(
wsdl, plugins=plugins, transport=get_transport(), **kwargs
)
except Exception as e:
logger.fatal("Failed to create SOAP client with WSDL at %s" % wsdl)
raise e
return clients[wsdl]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.