text stringlengths 38 1.54M |
|---|
from folio import Folio
proj = Folio(__name__, source_path='viewer', jinja_extensions=['content_object_tag.ContentObjectTagExtension'])
proj.build() |
import FWCore.ParameterSet.Config as cms
siPixelRawData = cms.EDProducer("SiPixelDigiToRaw",
Timing = cms.untracked.bool(False),
InputLabel = cms.InputTag("simSiPixelDigis")
)
|
DEBUG = 0
TRACE = 0
def set_debug_level(level):
global DEBUG
global TRACE
if level == 1 or level == "debug" or level == "Debug" or level == "DEBUG" or level == "d" or level == "D":
DEBUG = 1
TRACE = 0
print("DEBUG switched on.")
elif level == 2:
DEBUG = 1
TRACE = 1
print("DEBUG and TRACE switched on.")
elif level == "trace" or level == "Trace" or level == "TRACE" or level == "t" or level == "T":
DEBUG = 0
TRACE = 1
print("TRACE switched on.")
else:
DEBUG = 0
TRACE = 0
#return DEBUG, TRACE
def get_debug_level():
return DEBUG, TRACE |
from __future__ import absolute_import, division, print_function
import pytest
from drms.utils import (
_pd_to_datetime_coerce, _pd_to_numeric_coerce,
_split_arg, _extract_series_name)
# test_pd_to_datetime
# test_pd_to_numeric
@pytest.mark.parametrize('in_obj, expected', [
('', []),
('asd', ['asd']),
('aa,bb,cc', ['aa', 'bb', 'cc']),
('aa, bb, cc', ['aa', 'bb', 'cc']),
(' aa,bb, cc, dd', ['aa', 'bb', 'cc', 'dd']),
('aa,\tbb,cc, dd ', ['aa', 'bb', 'cc', 'dd']),
(u'aa,\tbb,cc, dd ', [u'aa', u'bb', u'cc', u'dd']),
([], []),
(['a', 'b', 'c'], ['a', 'b', 'c']),
(('a', 'b', 'c'), ['a', 'b', 'c']),
])
def test_split_arg(in_obj, expected):
res = _split_arg(in_obj)
assert len(res) == len(expected)
for i in range(len(res)):
assert res[i] == expected[i]
@pytest.mark.parametrize('ds_string, expected', [
('hmi.v_45s', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/365d@1d]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/365d@1d][?QUALITY>=0?]', 'hmi.v_45s'),
('hmi.v_45s[2010.05.01_TAI/1d@6h]{Dopplergram}', 'hmi.v_45s'),
])
def test_extract_series(ds_string, expected):
assert _extract_series_name(ds_string) == expected
|
import tkinter as tk
import tkinter.ttk as ttk
from gui.parameters.filter_parameters import FilterParameters
from model.interface import get_interfaces
class InterfacePage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.__create_widgets()
def select(self, event):
FilterParameters.interface = self.interface_list[int(self.interface_listbox.focus())].name
print(f"select: {FilterParameters.interface}")
self.controller.get_frame("PacketPage").update_interface()
self.controller.show_frame("PacketPage")
def __create_widgets(self):
label = tk.Label(self, text="Choose interface", font=self.controller.title_font)
label.pack(side="top", fill="x", pady=10)
self.interface_list = get_interfaces()
self.interface_listbox = ttk.Treeview(self)
self.scrollbar = ttk.Scrollbar(self.interface_listbox, orient="vertical", command=self.interface_listbox.yview)
self.interface_listbox.configure(yscrollcommand=self.scrollbar.set)
self.interface_listbox["columns"] = ("ipv4", "ipv6", "mac", "desc")
self.interface_listbox.heading("#0", text="Name", anchor=tk.W)
self.interface_listbox.heading("ipv4", text="IPv4", anchor=tk.W)
self.interface_listbox.heading("ipv6", text="IPv6", anchor=tk.W)
self.interface_listbox.heading("mac", text="MAC", anchor=tk.W)
self.interface_listbox.heading("desc", text="Description", anchor=tk.W)
i = 0
for interface in self.interface_list:
self.interface_listbox.insert("", tk.END, i, text=interface.name, values=(interface.ipv4,
interface.ipv6,
interface.mac,
interface.desc))
i = i + 1
self.interface_listbox.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.interface_listbox.bind("<Double-1>", lambda event: self.select(event=event))
# self.select_button = tk.Button(self, text="Select", command=lambda event: self.select(event=event))
# self.select_button.pack(pady=5)
|
import numpy as np
def test_op_maxpool(ugraph):
with ugraph.begin_construction():
tensor_x, = ugraph.add_op(
np.random.rand(10, 256, 256, 5),
op_type='Const',
name='x'
)
tensor_out, = ugraph.add_op(
tensor_x,
op_type='MaxPool',
name='pool',
ksize_height=32,
ksize_width=32,
stride_height=2,
stride_width=2,
padding='SAME',
is_output=True
)
assert tensor_out.shape == [10, 128, 128, 5]
|
import roslib; roslib.load_manifest('george')
import rospy
###############################################################################################
###############################################################################################
class RobotDescription():
###############################################################################################
###############################################################################################
###########################################################
def __init__(self):
###########################################################
self.name = ""
self.NAppendages = 0
self.max_nservos = 0
self.total_servos = 0
self.appendages = []
###########################################################
def Print(self):
###########################################################
print "name: %s" % self.name
print "NAppendages: %d" % self.NAppendages
print "max_nservos: %d" % self.max_nservos
print "total_servos: %d" % self.total_servos
for i in range(self.NAppendages):
self.appendages[i].Print()
###########################################################
def str(self):
###########################################################
str = "{\'name\' :%s " % self.name
str = "%s, \'NAppendages\' : %d" % (str, self.NAppendages)
str = "%s, \'max_nservos\' : %d" % (str, self.max_nservos)
str = "%s, \'total_servos\' : %d" % (str, self.total_servos)
str = "%s, \'Appendages\' : ["
for i in range(self.NAppendages):
str = "%s, {%s}" % (str, self.appendages[i].str())
str = "%s], \'max_nservos\' : %d, \'total_servos\' : %d" % (str, self.max_nservos, self.total_servos)
return str
###########################################################
def ReadParameters(self):
###########################################################
self.name = rospy.get_param('robot_name')
self.NAppendages = rospy.get_param('NAppendages')
for i in range(self.NAppendages):
rospy.logdebug('Getting appendage #' + str(i))
appendage_name = rospy.get_param('appendage_name' + str(i))
appendage_nservos = rospy.get_param('nservos' + str(i))
appendage_firstservo = rospy.get_param('firstservo' + str(i))
jointnames_str = rospy.get_param('joints' + str(i))
jointnames = jointnames_str.split(',')
self.AddAppendage(appendage_name, appendage_nservos, appendage_firstservo, jointnames)
rospy.logdebug("robot description:")
rospy.logdebug(self.str())
###########################################################
def AddAppendage(self, appendage_name="", nservos=0, firstservo=0, jointnames=[]):
###########################################################
self.appendages.append(Appendage(appendage_name, nservos, firstservo, jointnames))
self.total_servos += nservos
if nservos > self.max_nservos:
self.max_nservos = nservos
###############################################################################################
###############################################################################################
class Appendage():
###############################################################################################
###############################################################################################
###########################################################
def __init__(self, name="", nservos=0, firstservo=0, jointnames=[]):
###########################################################
self.name = name;
self.nservos = nservos;
self.firstservo = firstservo;
self.jointnames = jointnames;
###########################################################
def Print(self):
###########################################################
print "appenage name: %s" % self.name
print " nservos: %d" % self.nservos
print " firstservo: %d" % self.firstservo
print " jointnames = %s" % self.jointnames
###########################################################
def str(self):
###########################################################
str = "appendage_name:%s" % self.name
str = "%s, nservos:%d" % (str, self.nservos)
str = "%s, firstservo:%d" % (str, self.firstservo)
str = "%s, jointnames:%s" % (str, self.jointnames)
return str |
# coding=utf-8
from distutils.core import setup
from setuptools import find_packages
import zsh_history_to_fish
def get_readme():
with open('README.md') as readme_file:
return readme_file.read()
setup(
name='zsh-history-to-fish',
version=zsh_history_to_fish.__version__,
description=zsh_history_to_fish.__description__,
long_description=get_readme(),
long_description_content_type='text/markdown',
url='https://github.com/rsalmei/zsh-history-to-fish',
author=zsh_history_to_fish.__author__,
author_email=zsh_history_to_fish.__email__,
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Environment :: Console',
'Natural Language :: English',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords='zsh fish shell history script'.split(),
packages=find_packages(),
python_requires='>=3.5, <4',
install_requires=['click'],
extras_require={},
entry_points={
'console_scripts': [
'zsh-history-to-fish=zsh_history_to_fish.command:exporter',
],
},
)
|
from locators import Catalog, Alert
from .BasePage import BasePage
class AdminProducts(BasePage):
def open(self):
"""Открытие панели администратора"""
self._open("/admin")
return self
def open_product_page(self):
self._click(Catalog.navigation.catalog_outs)
self._click(Catalog.edit_products.products)
def add_product(self):
self._click(Catalog.edit_products.add_new_product)
def product_filter(self, text):
self._clear_element_(Catalog.filter_product.input_product_name)
self._input(Catalog.filter_product.input_product_name, value=text)
self._click(Catalog.filter_product.accept_filter)
def click_firstline_checkbox(self):
self._click(Catalog.productList.product_check_box)
def delete_product(self):
self._click(Catalog.edit_products.delete_product)
def check_alert(self):
self._wait_for_visible(Alert.success.it)
def alert_present(self):
self._wait_for_alert()
def edit_product(self):
self._click(Catalog.productList.edit_product)
|
import bcrypt
from psql import query, query_no_fetch, make_safe
#needed to hash the password (makes it more secure)
salt = bcrypt.gensalt()
def register_user(uname, psword, name, street, postal, city, country):
"""
Input: Strings (Username, Password, Name, Street, Postal, City, Country)
Checks whether a username already exists and if the user filled out all fields
and that the postcode is a number. Then it hashes the password and stores all
the data in the psql table 'users'
Output: String that indicates the success of this function
"""
#counts the given username, returns false if it counted >0
res = query("SELECT COUNT(*) FROM users WHERE username='{}'".format(make_safe(uname)))
if res[0][0] > 0:
return "already_exists"
try:
postal = int(postal)
except:
pass
#checks if all boxes were filled out and postcode is an integer
if uname == "" or psword == "" or name == "" or street == "" or postal == "" or city == "" or country == "" or not isinstance(postal, int):
return "fill_out_all"
#hashes the password so it is not clear in our database
hashed_password = bcrypt.hashpw(psword.encode('utf-8'), salt).decode('utf-8')
#inserts the new user into the table
query_no_fetch("INSERT INTO users VALUES ('{}', '{}', '{}', '{}', '{}', '{}', '{}')".format(make_safe(uname),
make_safe(hashed_password),
make_safe(name),
make_safe(street),
postal,
make_safe(city),
make_safe(country)))
return "user_registered"
def is_login_successful(uname, psword):
"""
Input: Strings(Username, Password)
Checks if the given password matches the password in the table.
Output: String that indicates the success of this function
"""
#query to get the data corresponding to the given username
res = query("SELECT * FROM users WHERE username='{}'".format(make_safe(uname)))
#if the username doesn't exist
if len(res) == 0:
return "username_not_found", uname
#if the correct password was entered
if bcrypt.checkpw(psword.encode('utf-8'), res[0][1].encode('utf-8')):
return "correct_password", uname
#if none of the above worked
return "wrong_password", uname
|
# ---------------------------------------------------------------------------
# hydrology_processing.py
#
# Description: Pulls three feature classes of data from downloaded Geodatabases on
# hydrology: NHDArea, NHDFlowline, and NHD Waterbody. This data will be
# pulled from all the SubRegions covering California. The data is then
# projected into a GDB as Nad83 CAALB. A select runs on each layer based
# on FCode or FType. Fields are added and updated with relevant FRA information.
# All of the feature classes from each SubRegion are merged based on type. Note that the
# Feature classes for Area and Waterbody and merged together as well.
# A Buffer analysis runs on each merged feature class of Area, Flowline, and Waterbody.
# The buffered feature class is then intersected with the Land Ownership feature
# class to obtain the UnitID. The feature classes are then dissolved to just the
# relevant FRA fields added earlier.
#
# Arcpy Usage: FeatureClassToShapefile_conversion, Rename_management, Project_management,
# FeatureClassToGeodatabase_conversion, MakeFeatureLayer_management, SelectLayerByAttribute_management,
# CopyFeatures_management, GetCount_management, AddField_management, UpdateCursor, Merge_management,
# Buffer_analysis, RepairGeometery_management, PairwiseIntersect_analysis, PairwiseDissolve_analyis
#
# Runtime Estimates: Total time = 4 hr 21 min 7 sec
# Export and projection of original data = 55 min
#
# Created by: Josh Klaus 08/24/2017 jklaus@fs.fed.us
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
import sys
import os
import datetime
# Set workspace or obtain from user input
# in_workspace = "C:\\Users\\jklaus\\Documents\\Python_Testing\\fra_new\\"
in_workspace = sys.argv[1]
arcpy.env.workspace = in_workspace
arcpy.env.overwriteOutput = True
# using the now variable to assign year every time there is a hardcoded 2017
now = datetime.datetime.today()
curMonth = str(now.month)
curYear = str(now.year)
arcpy.AddMessage("Year is " + curYear)
# hydroWorkspace = in_workspace + "\\" + "NHD" + curYear + "\\" + "Subregions" + "\\"
hydroWorkspace = in_workspace + "\\" + "Downloads" + "\\" + "Hydro" + "\\"
outputDir = in_workspace + "\\" + "Output"
outputHydroDir = "Hydro" + curYear
outputWorkspace = outputDir + "\\" + outputHydroDir + "\\"
hydroFeatureDataset = "\\" + "Hydrography" + "\\"
# Need to rename when done with testing
projectedGDB = "Hydro_" + curYear + "_CAALB83.gdb"
outputProjGDB = outputWorkspace + projectedGDB
sr = arcpy.SpatialReference(3310)
subRegionList = ["1503", "1604", "1605", "1606", "1710", "1712",
"1801", "1802", "1803", "1804", "1805", "1806",
"1807", "1808", "1809", "1810"]
# subRegionList = ["1503", "1801"]
nhdAreaFC = "NHDArea"
nhdFlowlineFC = "NHDFlowline"
nhdWaterbodyFC = "NHDWaterbody"
waterFeatureList = [nhdAreaFC, nhdFlowlineFC, nhdWaterbodyFC]
nhdAreaList = []
nhdFlowlineList = []
nhdWaterbodyList = []
nhdAreaMerge = "NHDArea_Merge"
nhdFlowlineMerge = "NHDFlowline_Merge"
nhdWaterbodyMerge = "NHDWaterBody_Merge"
nhdArea_WaterbodyMerge = "NHDWaterbody_Area_Merge"
mergeList = [nhdAreaMerge, nhdFlowlineMerge, nhdWaterbodyMerge, nhdArea_WaterbodyMerge]
nhdAreaBuffer = "NHDArea_Merge_Buff"
nhdFlowlineBuffer = "NHDFlowline_Merge_Buff"
nhdWaterbodyBuffer = "NHDWaterBody_Merge_Buff"
nhdArea_WaterbodyBuffer = "NHDWaterbody_Area_Merge_Buff"
bufferList = [nhdAreaMerge, nhdFlowlineMerge, nhdWaterbodyMerge, nhdArea_WaterbodyMerge]
intersectList = []
bufferField = "BUFFM_FIRE"
usfsOwnershipFeatureClass = in_workspace + \
"\\USFS_Ownership_LSRS\\" + curYear + \
"_USFS_Ownership_CAALB83.gdb\\USFS_OwnershipLSRS_" + curYear
try:
if not os.path.exists(outputDir):
arcpy.AddMessage("Creating directory for Output")
os.makedirs(outputDir)
if not os.path.exists(outputDir + "\\" + outputHydroDir):
arcpy.AddMessage("Creating output directory for " + outputHydroDir)
os.makedirs(outputDir + "\\" + outputHydroDir)
if arcpy.Exists(outputWorkspace + "\\" + projectedGDB):
newHydroWorkSpace = outputWorkspace + "\\" + projectedGDB + "\\"
else:
arcpy.CreateFileGDB_management(outputWorkspace, projectedGDB)
newHydroWorkSpace = outputWorkspace + "\\" + projectedGDB + "\\"
arcpy.AddMessage("Ouput Workspace: " + newHydroWorkSpace)
for region in subRegionList:
hydroGDB = "NHD_H_" + region + "_HU4_GDB.gdb"
if arcpy.Exists(hydroWorkspace + hydroGDB):
arcpy.AddMessage("______________________________________")
arcpy.AddMessage("Processing " + hydroGDB)
for waterFeature in waterFeatureList:
arcpy.AddMessage("--------------------------------------")
arcpy.AddMessage("processing " + waterFeature)
flowlineShapefile = waterFeature + ".shp"
inHydroFD = hydroWorkspace + hydroGDB + hydroFeatureDataset
inHydroFC = inHydroFD + waterFeature
arcpy.AddMessage("Origin of Data: " + inHydroFC)
arcpy.AddMessage("Exporting " + waterFeature + " to shapefile for projecting")
arcpy.FeatureClassToShapefile_conversion(inHydroFC, outputWorkspace)
newShapefile = waterFeature + "_" + region
# Rename files to add Subregion to name to distinguish different feature classes as loop runs
arcpy.Rename_management(outputWorkspace + waterFeature + ".shp", newShapefile)
inProjShapefile = outputWorkspace + newShapefile + ".shp"
outProjShapefile = outputWorkspace + newShapefile + "_proj.shp"
spatial_ref = arcpy.Describe(inProjShapefile).spatialReference
arcpy.AddMessage("Current Spatial Reference is : " + spatial_ref.name)
if spatial_ref.name != "NAD_1983_California_Teale_Albers":
arcpy.AddMessage("Reprojecting shapefile to NAD 1983 California Teale Albers")
arcpy.Project_management(inProjShapefile, outProjShapefile, sr)
arcpy.AddMessage("reprojection complete")
arcpy.AddMessage("Converting shapefile to GDB")
arcpy.FeatureClassToGeodatabase_conversion(outProjShapefile, newHydroWorkSpace)
arcpy.AddMessage("Finished converting shapefile to GDB")
inSelectFC = newHydroWorkSpace + newShapefile + "_proj"
selectFC = newHydroWorkSpace + newShapefile + "_select"
selectQuery = ""
if waterFeature == nhdFlowlineFC:
selectQuery = "( FCode = 46000 OR FCode = 46003 OR FCode = 46006 )"
elif waterFeature == nhdWaterbodyFC:
selectQuery = "( FType = 436 OR FType = 466 OR FType = 493 " \
"OR FCode = 39004 OR FCode = 39009 OR FCode = 39010 OR FCode = 39011)"
elif waterFeature == nhdAreaFC:
selectQuery = "( FCode = 46000 OR FCode = 46003 OR FCode = 46006 )"
arcpy.AddMessage("Selecting features based on following Select Query: " + selectQuery)
arcpy.MakeFeatureLayer_management(inSelectFC, "lyr" )
arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION", selectQuery )
arcpy.AddMessage("Copying selected records to new feature ......")
arcpy.CopyFeatures_management("lyr", selectFC)
result = arcpy.GetCount_management(selectFC)
count = int(result.getOutput(0))
arcpy.AddMessage("Total Number of Records: " + str(count))
arcpy.AddMessage("Adding fields")
arcpy.AddField_management(selectFC, "UnitID", "TEXT", "", "", "5", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "GRANK_FIRE", "TEXT", "", "", "50", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "SOURCEFIRE", "TEXT", "", "", "50", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "SNAME_FIRE", "TEXT", "", "", "60", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "CNAME_FIRE", "TEXT", "", "", "60", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "BUFFT_FIRE", "SHORT", "", "", "", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "BUFFM_FIRE", "SHORT", "", "", "", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "CMNT_FIRE", "TEXT", "", "", "150", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddField_management(selectFC, "INST_FIRE", "TEXT", "", "", "150", "", "NULLABLE",
"NON_REQUIRED", "")
arcpy.AddMessage("Updating fields")
cur = arcpy.UpdateCursor(selectFC)
for row in cur:
fCodefield = row.getValue("FCode")
fTypefield = row.getValue("FType")
row.SOURCEFIRE = "NHD Subbasins " + curMonth + " " + curYear
row.SNAME_FIRE = "Hydro"
row.CNAME_FIRE = "Hydro"
row.BUFFT_FIRE = "300"
row.BUFFM_FIRE = 91.44
if waterFeature == nhdAreaFC:
row.GRANK_FIRE = "NHDArea Stream/River"
elif waterFeature == nhdWaterbodyFC:
row.GRANK_FIRE = "NHD Waterbody"
elif waterFeature == nhdFlowlineFC:
row.GRANK_FIRE = "NHDFlowline Stream/River"
if fCodefield == 46000:
row.CMNT_FIRE = "FCode 46000 - Stream/River"
row.INST_FIRE = "Stream/River"
elif fCodefield == 46003:
row.CMNT_FIRE = "FCode 46003 - Stream/River Intermittent"
row.INST_FIRE = "Stream/River Intermitten"
elif fCodefield == 46006:
row.CMNT_FIRE = "FCode 46006 - Stream/River Perennial"
row.INST_FIRE = "Stream/River Perennial"
elif fCodefield == 39004:
row.CMNT_FIRE = "FCode 39004 - LakePond Perennial"
row.INST_FIRE = "LakePond Perennial"
elif fCodefield == 39009:
row.CMNT_FIRE = "FCode 39009 - LakePond Perennial Average Stage"
row.INST_FIRE = "LakePond Perennial Average Stage"
elif fCodefield == 39010:
row.CMNT_FIRE = "FCode 39010 - LakePond Perennial Normal Pool"
row.INST_FIRE = "LakePond Perennial Normal Pool"
elif fCodefield == 39011:
row.CMNT_FIRE = "FCode 39011 - LakePond Perennial Date of Photography"
row.INST_FIRE = "LakePond Perennial Date of Photography"
elif fTypefield == 436:
row.CMNT_FIRE = "FType 436 - Reservoir"
row.INST_FIRE = "Reservoir"
elif fTypefield == 466:
row.CMNT_FIRE = "FType 466 - Swamp Marsh"
row.INST_FIRE = "Swamp Marsh"
elif fTypefield == 493:
row.CMNT_FIRE = "FType 493 - Estuary"
row.INST_FIRE = "Estuary"
cur.updateRow(row)
del cur
if waterFeature == nhdAreaFC:
nhdAreaList.append(selectFC)
elif waterFeature == nhdFlowlineFC:
nhdFlowlineList.append(selectFC)
elif waterFeature == nhdWaterbodyFC:
nhdWaterbodyList.append(selectFC)
else:
arcpy.AddMessage(region + " GDB does not exist may need to download and unzip")
arcpy.AddMessage("________________________________________________")
arcpy.AddMessage("------------------------------------------------")
arcpy.AddMessage("________________________________________________")
arcpy.AddMessage("Merging Flowlines")
arcpy.Merge_management(nhdFlowlineList, outputProjGDB + "\\" + nhdFlowlineMerge)
arcpy.AddMessage("Merging Areas")
arcpy.Merge_management(nhdAreaList, outputProjGDB + "\\" + nhdAreaMerge)
arcpy.AddMessage("Merging Waterbodies")
arcpy.Merge_management(nhdWaterbodyList, outputProjGDB + "\\" + nhdWaterbodyMerge)
arcpy.AddMessage("Merging Areas and Waterbodies")
arcpy.Merge_management([outputProjGDB + "\\" + nhdAreaMerge, outputProjGDB + "\\" + nhdWaterbodyMerge],
outputProjGDB + "\\" + nhdArea_WaterbodyMerge)
arcpy.AddMessage("__________________________________________________")
arcpy.AddMessage("--------------------------------------------------")
arcpy.AddMessage("__________________________________________________")
for item in mergeList:
arcpy.AddMessage("|------------------------------------------------|")
arcpy.AddMessage("|------------------------------------------------|")
arcpy.AddMessage("__________________________________________________")
arcpy.AddMessage("Buffering " + item + " features ....")
bufferInput = outputProjGDB + "\\" + item
bufferOutput = outputProjGDB + "\\" + item + "_Buff"
arcpy.Buffer_analysis(bufferInput, bufferOutput, bufferField)
arcpy.AddMessage("Repairing Geometry of Buffered " + item)
arcpy.RepairGeometry_management(bufferOutput)
# usfsOwnershipFeatureClass = in_workspace + \
# "\\USFS_Ownership_LSRS\\2017_USFS_Ownership_CAALB83.gdb\\USFS_OwnershipLSRS_2017"
intersectFeatureClass = bufferOutput + "_intersect"
arcpy.AddMessage("Intersecting with USFS Ownership feature class .....")
arcpy.AddMessage("Please be patient while this runs .....")
if sys.version_info[0] < 3:
arcpy.Intersect_analysis([bufferOutput, usfsOwnershipFeatureClass], intersectFeatureClass)
else:
arcpy.PairwiseIntersect_analysis([bufferOutput, usfsOwnershipFeatureClass], intersectFeatureClass)
arcpy.AddMessage("Completed Intersection")
arcpy.AddMessage(" ____________________________________________________________________")
arcpy.AddMessage("Updating UnitID field from intersection")
cur = arcpy.UpdateCursor(intersectFeatureClass)
field = "UnitID_FS"
# populating UnitID field with UnitID_FS field
for row in cur:
row.UnitID = str(row.getValue(field))
cur.updateRow(row)
del cur
arcpy.AddMessage("Repairing Geometry ......")
arcpy.RepairGeometry_management(intersectFeatureClass)
# make a copy of intersectFeatureClass for NOAA processing
arcpy.AddMessage("Selecting out Intermittents")
perennialFeatureClass = outputProjGDB + "\\" + item + "_perennial"
arcpy.MakeFeatureLayer_management(intersectFeatureClass, "lyr")
if item == "NHDFlowline_Merge":
arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION", "(FCode <> 46000) AND (FCode <> 46003)")
arcpy.AddMessage("Selecting out 46000 and 46003 for Flowlines")
else:
arcpy.SelectLayerByAttribute_management("lyr", "NEW_SELECTION", "(FCode <> 46003)")
arcpy.AddMessage("Selecting out 46003 for Waterbodies and Areas")
result = arcpy.GetCount_management("lyr")
count = int(result.getOutput(0))
arcpy.AddMessage("Total Number of Records: " + str(count))
if count > 0:
arcpy.AddMessage("Copying selected records to Geodatabase without intermittent data.")
arcpy.CopyFeatures_management("lyr", perennialFeatureClass)
arcpy.AddMessage("Dissolving Features")
dissolveFeatureClass = perennialFeatureClass + "_dissolved"
if sys.version_info[0] < 3:
arcpy.Dissolve_management(perennialFeatureClass, dissolveFeatureClass,
["UnitID", "GRANK_FIRE", "SNAME_FIRE", "CNAME_FIRE", "SOURCEFIRE",
"BUFFT_FIRE", "BUFFM_FIRE", "CMNT_FIRE", "INST_FIRE", "BUFF_DIST"], "", "SINGLE_PART")
else:
arcpy.PairwiseDissolve_analysis(perennialFeatureClass, dissolveFeatureClass,
["UnitID", "GRANK_FIRE", "SNAME_FIRE", "CNAME_FIRE", "SOURCEFIRE",
"BUFFT_FIRE", "BUFFM_FIRE", "CMNT_FIRE", "INST_FIRE", "BUFF_DIST"])
arcpy.AddMessage("Repairing Dissolved Geometry ......")
arcpy.RepairGeometry_management(dissolveFeatureClass)
arcpy.AddMessage("Dissolve and Repair complete")
arcpy.AddMessage(" ____________________________________________________________________")
interimfc = outputProjGDB + "\\" + item + "_geocomplete"
arcpy.CopyFeatures_management(dissolveFeatureClass, interimfc)
except arcpy.ExecuteError:
arcpy.AddError(arcpy.GetMessages(2))
except Exception as e:
arcpy.AddMessage(e)
|
#-------------------------------------------------------------
if __name__ == "__main__":
import tempfile
import os
from anuga.shallow_water import Domain, Reflective_boundary, \
Dirichlet_boundary
from anuga.pmesh.mesh_interface import create_mesh_from_regions
# Create a scenario outline.
polygon = [[0,0],[100,0],[100,100],[0,100]]
boundary_tags = {'wall':[0,1,3],'wave':[2]}
inner1_polygon = [[10,10],[20,10],[20,20],[10,20]]
inner2_polygon = [[30,30],[40,30],[40,40],[30,40]]
max_area = 1
interior_regions = [(inner1_polygon, 5),(inner2_polygon, 10)]
m = create_mesh_from_regions(polygon,
boundary_tags,
max_area,
interior_regions=interior_regions)
# Create an ungenerate file
fileName = tempfile.mktemp(".txt")
file = open(fileName,"w")
file.write(" 1 ?? ??\n\
90.0 90.0\n\
81.0 90.0\n\
81.0 81.0\n\
90.0 81.0\n\
90.0 90.0\n\
END\n\
2 ?? ??\n\
10.0 80.0\n\
10.0 90.0\n\
20.0 90.0\n\
10.0 80.0\n\
END\n\
END\n")
file.close()
# import the ungenerate file
m.import_ungenerate_file(fileName)
os.remove(fileName)
m.generate_mesh(maximum_triangle_area=max_area,verbose=False)
mesh_filename = "mesh.tsh"
m.export_mesh_file(mesh_filename)
# Run a simulation on the mesh
domain = Domain(mesh_filename, use_cache = False)
Br = Reflective_boundary(domain)
Bd = Dirichlet_boundary([3,0,0])
domain.set_boundary( {'wall': Br, 'wave': Bd} )
yieldstep = 0.1
finaltime = 20
for t in domain.evolve(yieldstep, finaltime):
domain.write_time()
|
a=((10,20,30),[40,50,60],(70,80,90))
print(a)
for p in a:
print(p,type(p))
a[1][1]=100
print(a)
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env('DJANGO_SECRET_KEY', default='NphyKBHddMHwgpJcH5BbAvDCk2Ut4uVG')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
"localhost",
"0.0.0.0",
"127.0.0.1",
]
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ['debug_toolbar'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware'] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ['django_extensions'] # noqa F405
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin-console/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("Prabakaran Kumaresshan", "k_prabakaran@hotmail.com"),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
if DEBUG:
INSTALLED_APPS += ['drf_yasg']
# REST FRAMEWORK
REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] = ( # noqa F405
'rest_framework.authentication.TokenAuthentication',
)
REST_FRAMEWORK['DEFAULT_PERMISSION_CLASSES'] = ( # noqa F405
'rest_framework.permissions.IsAuthenticated',
)
|
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('load_ext', 'google.cloud.bigquery')
# In[6]:
get_ipython().run_cell_magic('bigquery', 'top_five_states ', 'SELECT\n state AS state,\n COUNT(1) AS birth_count\nFROM `bigquery-public-data.samples.natality`\nGROUP BY state\nORDER BY birth_count DESC\nLIMIT 6')
# In[3]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[14]:
top_five_states.plot(kind='bar', x='state', y='birth_count');
# In[15]:
top_five_states.plot(x='state');
# In[9]:
get_ipython().run_cell_magic('bigquery', 'top_five_states_with_Ohio', "SELECT\n state AS state, birth_count from (SELECT state,\n COUNT(1) AS birth_count\nFROM `bigquery-public-data.samples.natality`\nGROUP BY state\nORDER BY birth_count DESC\nLIMIT 7)\n \nUNION ALL\n\nSELECT\n state,\n COUNT(1) AS birth_count\nFROM `bigquery-public-data.samples.natality`\nWHERE state='OH'\nGROUP BY state\nORDER BY birth_count DESC\nLIMIT 7")
# In[10]:
top_five_states_with_Ohio.plot(kind='bar', x='state', y='birth_count');
# In[11]:
from google.cloud import bigquery
client = bigquery.Client()
sql = """
SELECT
mother_age,
COUNT(1) AS count,
year
FROM
`bigquery-public-data.samples.natality`
WHERE
year>1997 AND year < 2009
GROUP BY
mother_age, year
ORDER BY
count DESC
"""
df = client.query(sql).to_dataframe()
df.head()
# In[12]:
pivot_table = df.pivot(index='year', columns='mother_age', values='count')
pivot_table.plot(kind='bar', stacked=True, figsize=(13, 7));
# In[13]:
get_ipython().run_cell_magic('bigquery', 'father_age', 'SELECT\n year,\n AVG(father_age) AS AVG_father_age\n\nFROM\n `bigquery-public-data.samples.natality`\nWHERE\n year>1997 AND year < 2009\nGROUP BY year\nORDER BY\n year DESC\nLIMIT 11')
# In[16]:
father_age.plot(kind='bar', x='year', y='AVG_father_age');
# In[17]:
father_age.plot (x='year', y='AVG_father_age');
|
#!/usr/bin/env python3
import os
import pandas as pd
import sklearn
from asm_parser import AsmParser
from utils import search_path
from sklearn.externals import joblib
from sklearn.pipeline import Pipeline
import keras
import word2vec
from keras.models import Sequential
ALL_VULS = ['Underflow','Overflow','Multisig','CallDepth','TOD','TimeDep','Reentrancy','AssertFail',
'TxOrigin','CheckEffects','InlineAssembly','BlockTimestamp','LowlevelCalls','BlockHash','SelfDestruct']
def stem(op):
return op.rstrip('0123456789')
# TODO integrate ../feature/all-sol-to-opcodes.sh,
# because it should use THE SAME sol-to-opcode converter.
def sol_to_ops(solfile):
parser = AsmParser(solfile)
return [ op['name'] for op in parser.opcodes ]
def sol_to_data(sol):
ops = [ stem(op) for op in sol_to_ops(sol) ]
return pd.DataFrame({'Opcodes': [ ' '.join(ops) ]})
def f1(y_true, y_pred):
from keras import backend as K
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
class ModelRepo:
def __init__(self, repo_dir):
self.__repo_dir = repo_dir
self.__models = {} #cache model ,prevent to load everytime
def load(self, *keys):
rel_path = os.path.join(*keys)
model = self.__models.get(rel_path) #relative path as key
if model is None:
model = self.__load(rel_path)
self.__models[rel_path] = model
return model
def __load(self, rel_path):
path_prefix = os.path.join(self.__repo_dir, rel_path)
path, suffix = search_path(path_prefix)
if path is None:
return None
if suffix == '.pkl.z' or suffix == '.pkl':
return joblib.load(path)
elif suffix == '.h5':
return keras.models.load_model(path, custom_objects={'f1': f1})
elif suffix == '.w2v.bin':
return word2vec.load(path)
else:
raise Exception("unknown model type '%s' for '%s'" % (suffix, path))
def save(self, model, *keys):
rel_path = os.path.join(*keys)
self.__models[rel_path] = model
self.__save(model, rel_path)
def __save(self, model, rel_path):
if type(model) is sklearn.pipeline.Pipeline:
suffix = '.pkl.z'
func = joblib.dump
elif type(model) is keras.models.Sequential:
suffix = '.h5'
func = lambda m, p: m.save(p)
elif type(model) is word2vec.WordVectors:
suffix = '.w2v.bin'
func = None
else:
suffix = '.bin'
func = joblib.dump
path = os.path.join(self.__repo_dir, rel_path + suffix)
os.makedirs(os.path.dirname(path), exist_ok=True)
if func is not None:
func(model, path)
def _get_vul_op_data(vulfile, ftfile):
y_mapping = {'r': 1, 'o': 1, 'x': 0}
data = pd.merge(
left_index=True,
left=pd.read_csv(vulfile, compression='xz', index_col=0).replace(y_mapping),
right_index=True,
right=pd.read_csv(ftfile, compression='xz', index_col=0),
how='inner')
print('data len: %d' % len(data))
data.drop(columns='MD5', inplace=True)
data.drop_duplicates(inplace=True)
data.dropna(axis=0, how='any', inplace=True)
print('data len: %d' % len(data))
return data
# if cachefile is not None, using it to save/load data
def get_vul_op_data(vulfile, opfile, cachefile=None):
has_cache = cachefile is not None and os.path.exists(cachefile)
save_cache = cachefile is not None and not os.path.exists(cachefile)
data = pd.read_csv(cachefile, index_col=0) if has_cache else \
_get_vul_op_data(vulfile, opfile)
if save_cache:
data.to_csv(cachefile)
return data
def print_prediction(vuls, preds, pred_fts=None):
if preds.ndim == 1:
preds.reshape(1, -1)
vul_width = max([len(v) for v in vuls])
for pred in preds:
for vul, result in zip(vuls, pred):
print('{0: <{1}} {2}'.format(vul, vul_width, result))
# print decision features
fts = pred_fts.get(vul, None)
if fts is not None:
print('\n'.join([ " %.2f%%, %s" % (100*ratio, ft) for ratio, ft in fts ]))
__keepops = {
'ADD',
'SUB',
'SHA3',
'ADDRESS',
'BALANCE',
'ORIGIN',
'CALLER',
'CALLVALUE',
'CALLDATALOAD',
'CALLDATASIZE',
'CALLDATACOPY',
'CODESIZE',
'CODECOPY',
'GASPRICE',
'EXTCODESIZE',
'EXTCODECOPY',
'BLOCKHASH',
'COINBASE',
'TIMESTAMP',
'NUMBER',
'DIFFICULTY',
'GASLIMIT',
'GAS',
'CREATE',
'CALL',
'CALLCODE',
'RETURN',
'DELEGATECALL',
'SELFDESTRUCT',
'REVERT'
}
def is_stopop(w):
return w not in __keepops
def main():
def test_model_repo():
repo = ModelRepo('.model')
model = repo.load('knn', 'TOD')
repo.save(model, 'my', 'test')
model2 = repo.load('my', 'test')
def test_model_repo_keras():
repo = ModelRepo('.model')
model = repo.load('w2v-cnn')
repo.save(model, 'w2v-cnn-test')
def test_search_op_seq():
ops = sol_to_ops('../sc-src/0x0000000000b3F879cb30FE243b4Dfee438691c04.sol')
p, q = search_seq(ops, 'SUB ADD ADD SHA3 SUB'.split(), is_stopop)
print(p, q)
if 0 <= p < q:
print(ops[p:q])
if __name__ == '__main__':
main()
|
import numpy as np
from pandas import DataFrame, datetime, concat,read_csv, Series
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
from statsmodels.stats.diagnostic import acorr_ljungbox
from statsmodels.tsa.stattools import adfuller as ADF
import xgboost as xgb
from sklearn.model_selection import cross_val_score,KFold
from sklearn.model_selection import GridSearchCV
from sklearn.externals import joblib
from sklearn.metrics import mean_squared_error, r2_score
# convert date
def parser(x):
return datetime.strptime(x,"%Y-%m-%d")
#supervised
def timeseries_to_supervised(data, lag=1):
df = DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# diff series
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return Series(diff)
# invert diff value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
# 数据max_min标准化
# scale train and test data to [-1, 1]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test2D
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
# invert scale transform
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [value]
array = np.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
# model train
def fit_xgb(train):
xgb_model = xgb.XGBRegressor(seed=1)
X, y = train[:,0:-1], train[:,-1]
xgb_model.fit(X, y)
return xgb_model
# load data
series = read_csv('./data/DAU_tz0.csv')["value"]
print(series.head())
series.plot()
plt.show()
# 数据平稳化
raw_values = series.values
diff_values = difference(raw_values, 1)
print(diff_values.head())
plt.plot(raw_values, label="raw")
plt.plot(diff_values, label="diff")
plt.legend()
plt.show()
print('差分序列的ADF')
print(ADF(diff_values)[1])
print('差分序列的白噪声检验结果')
# 大于0.05为白噪声序列
#差分序列的ADF
# (-5.626262330867963, 1.1155939417390791e-06, 13, 512, {'1%': -3.443186695642769, '5%': -2.86720156693697, '10%': -2.569785402984619}, 11125.656632822487)
# 差分序列的白噪声检验结果
# (array([13.95689179]), array([0.00018705]))
print(acorr_ljungbox(diff_values, lags=1)[1][0])
# 序列转监督数据
supervised = timeseries_to_supervised(diff_values, 1)
print(supervised.head())
supervised_values = supervised.values
# 训练集测试集
split_num = int(len(supervised_values)/3) or 1
train, test = supervised_values[0:-split_num], supervised_values[-split_num:]
# 标准化
scaler, train_scaled, test_scaled = scale(train, test)
#模型训练
xgb_model = fit_xgb(train_scaled)
# 保存模型
joblib.dump(xgb_model, "./data/xgb_model")
# # load model
xgb_model = joblib.load("./data/xgb_model")
# validation
# test_scaled_x = test_scaled[:,0:-1]
#
# predictions_scaled = xgb_model.predict(test_scaled_x)
## pandas
# invert scaling
# predictions_scaled = predictions_scaled.reshape(len(predictions_scaled), 1)
# test_scaled_0 = [DataFrame(test_scaled[:,0])]
# test_scaled_0.append(DataFrame(predictions_scaled))
#
# con_predictions_scaled = concat(test_scaled_0,axis=1)
#
# inverted = scaler.inverse_transform(con_predictions_scaled)
# yhat = inverted[:,-1]
# validation
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = xgb_model.predict([X])
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
expected = raw_values[len(train) + i + 1]
mae = abs(yhat-expected)
print('data=%d, Predicted=%f, Expected=%f, mae=%.3f' % (i+1, yhat, expected, mae))
mae = np.average(abs(predictions - raw_values[-split_num:]))
print("Test MAE: %.3f",mae)
#report performance
rmse = sqrt(mean_squared_error(raw_values[-split_num:], predictions))
print('Test RMSE: %.3f' % rmse)
# line plot of observed vs predicted
plt.plot(raw_values[-split_num:], label="raw")
plt.plot(predictions, label="predict")
plt.title('XGBoost Test RMSE: %.3f' % rmse)
plt.legend()
plt.show()
|
from django.db.models.signals import pre_save
from django.dispatch import receiver
from group.models import Group
@receiver(pre_save, sender=Group)
def group_capitalize(sender, instance, **kwargs):
instance.first_name = instance.first_name.capitalize()
instance.last_name = instance.last_name.capitalize()
|
# -*- coding: utf-8 -*-
import json
import requests
import pandas as pd
import xmltodict
import re
"""
Client ID:
dj00aiZpPUlMRXp5U0loQmRXWiZzPWNvbnN1bWVyc2VjcmV0Jng9NTM-
シークレット:
qU7QwCYwqbQShh7Z6166pLeW2uAmyIOIJ7XxvDLJ
"""
CLIENT_ID = "dj00aiZpPUlMRXp5U0loQmRXWiZzPWNvbnN1bWVyc2VjcmV0Jng9NTM-"
SECRET = "qU7QwCYwqbQShh7Z6166pLeW2uAmyIOIJ7XxvDLJ"
class Yahoo:
"""
{
'AuctionID': 'n357518070',
'Title': '即決 美品 保証 ライカ Leica ライカTL2 シルバー [ボディ] 18188',
'CategoryId': '2084305451',
'Seller': {'Id': 'h54410', 'ItemListUrl': 'https://auctions.yahooapis.jp/AuctionWebService/V2/sellingList?sellerID=h54410', 'RatingUrl': 'https://auctions.yahooapis.jp/AuctionWebService/V1/ShowRating?id=h54410'},
'ItemUrl': 'https://auctions.yahooapis.jp/AuctionWebService/V2/auctionItem?auctionID=n357518070',
'AuctionItemUrl': 'https://page.auctions.yahoo.co.jp/jp/auction/n357518070',
'Image': {'@width': '115', '@height': '100', '#text': 'https://wing-auctions.c.yimg.jp/sim?furl=auctions.c.yimg.jp/images.auctions.yahoo.co.jp/image/dr000/auc0507/users/0f2907125
761fd4ca5cfe7cc6bfe89d0bd91c1e4/i-img1200x1043-1563442277eo6exd993495.jpg&dc=1&sr.fs=20000'},
'OriginalImageNum': '7',
'CurrentPrice': '178000.00',
'Bids': '0',
'EndTime': '2019-09-09T19:43:41+09:00',
'BidOrBuy': '178000.00',
'IsReserved': 'false',
'CharityOption': {'Proportion': '0'},
'Option': {'FeaturedIcon': 'https://s.yimg.jp/images/auct/front/images/featured.gif', 'BuynowIcon': 'https://s.yimg.jp/images/auct/front/images/buynow.gif', 'EasyPaymentIcon': 'https://s.yimg.jp/images/pay/icon_s16.gif', 'IsBold': 'fal
se', 'IsBackGroundColor': 'false', 'IsOffer': 'false', 'IsCharity': 'false'},
'IsAdult': 'false'
}
"""
column_permutations = ("Title", "CurrentPrice", "BidOrBuy", "shopName", "ItemUrl", "CategoryId",)
property_permutations = ("Title", "CurrentPrice", "BidOrBuy", "shopName", "ItemUrl", "CategoryId",)
def __init__(self):
pass
def search(self, query="", add_options={}):
url = "https://auctions.yahooapis.jp/AuctionWebService/V2/search"
item_parameters = {
'appid': CLIENT_ID,
'query': query,
"page": 1,
# "type": "all", # all(全文一致) or any(部分一致)
# "category": {id},
# 'output': 'json',
}
if any(add_options) and isinstance(add_options, dict):
item_parameters.update(add_options)
response = requests.get(url, params=item_parameters)
self._assert_response(response)
return response
def make_dataframe(self, items):
df = pd.DataFrame(columns=list(self.column_permutations))
for i, item in enumerate(items):
df.loc[i] = self.get_values(item)
return df
def get_values(self, item):
values = []
def _get_value(key, item):
if key == "ItemUrl":
ptn = r".+\?auctionID=(.+)"
id_ = re.match(ptn, item.get(key, ""))
base_url = "https://page.auctions.yahoo.co.jp/jp/auction/"
url = base_url + id_.groups()[0] if id_ is not None else base_url
return url
if isinstance(key, str):
return item.get(key, None)
if isinstance(key, list):
return " ".join([item.get(k) for k in key])
if isinstance(key, dict):
for k, v in key.items():
return _get_value(v, getattr(item, k))
for key in self.property_permutations:
values.append(_get_value(key, item))
return values
def _assert_response(self, response):
# 接続確認
assert response.status_code == 200, print("Response Error!! ", response.reply.ack)
# 返却値(item)確認
# assert type(response.reply.searchResult.item) == list, print("Invalid Item!! ", response.reply)
def get_items(self, response):
response = response.text
# XML to json( dict )
response = xmltodict.parse(response)
response = json.dumps(response, indent=2)
response = json.loads(response)
return response["ResultSet"]["Result"]["Item"]
def get_results(self, response):
response = response.text
# XML to json( dict )
response = xmltodict.parse(response)
response = json.dumps(response, indent=2)
response = json.loads(response)
return response["ResultSet"]["Result"]
def get_total_pages(self, response):
response = response.text
# XML to json( dict )
response = xmltodict.parse(response)
response = json.dumps(response, indent=2)
response = json.loads(response)
return response["ResultSet"]["@totalResultsReturned"]
def get_categories(self, category_id):
url = "https://auctions.yahooapis.jp/AuctionWebService/V2/categoryTree"
item_parameters = {
'appid': CLIENT_ID,
"category": category_id,
}
# if any(add_options) and isinstance(add_options, dict):
# item_parameters.update(add_options)
response = requests.get(url, params=item_parameters)
self._assert_response(response)
return response
|
#!/usr/bin/env python
import sys
from collections import defaultdict
from tabulate import tabulate
count = defaultdict(int)
for line in open(sys.argv[1]):
words = line.split('/')
model = words[1].split('-')
cat = model[0][0] + model[2][0]
if cat not in count:
count[cat] = 0
count[cat] += 1
headers = ["Fuel", "therm", "inter", "fast", "mixed"]
table = [['HEU', count['ht'], count['hi'], count['hf'], count['hm']],
['LEU', count['lt'], count['li'], count['lf'], count['lm']],
['IEU', count['it'], count['ii'], count['if'], count['im']],
['Pu', count['pt'], count['pi'], count['pf'], count['pm']],
['U233', count['ut'], count['ui'], count['uf'], count['um']],
['Mix', count['mt'], count['mi'], count['mf'], count['mm']]]
print(tabulate(table, headers=headers, tablefmt="grid"))
|
#-*- coding:utf-8 -*-
import json
import time
import datetime
from sensitive_user_portrait.extensions import user_datastore
from sensitive_user_portrait.time_utils import ts2datetime, ts2date
from flask import Blueprint, url_for, render_template, request, abort, flash, session, redirect, make_response
from flask.ext.security import login_required
mod = Blueprint('sensitive', __name__, url_prefix='/index')
# @mod.route('/')
# def loading():
# return render_template('index.html')
@mod.route('/overview/')
@login_required
def overview():
return render_template('index/overview.html')
@mod.route('/recommend_in/')
@login_required
def recommend_in():
return render_template('index/recommend_in.html')
"""
@mod.route('/tag/')
def tag():
return render_template('index/tag.html')
"""
@mod.route('/tag_search/')
@login_required
def tag_search():
return render_template('index/search/tag_search.html')
@mod.route('/tag_manage/')
@login_required
def tag_manage():
return render_template('index/tag_manage.html')
"""
@mod.route('/sensitive_words/')
def sensitive_words():
return render_template('index/sensitive.html')
"""
@mod.route('/sensiwords_manage/')
@login_required
def sensiwords_manage():
return render_template('index/sensiwords_manage.html')
@mod.route('/words_recommend/')
@login_required
def word_recommend():
return render_template('index/words_recommend.html')
@mod.route('/group_identify/')
@login_required
def group():
return render_template('index/group_identify.html')
'''
@mod.route('/group_results/')
def group_results():
name = request.args.get('name', '')
return render_template('index/group_results.html')
'''
@mod.route('/group_analysis/')
@login_required
def group_results():
task_name = request.args.get('task_name', '')
user_name = request.args.get('user', '')
return render_template('index/group_analysis.html', task_name=task_name, user_name=user_name)
@mod.route('/group_task/')
@login_required
def group_task():
return render_template('index/group_task.html')
@mod.route('/group_search/')
@login_required
def group_search():
return render_template('index/search/group_search.html')
@mod.route('/search_portrait/')
@login_required
def search_portrait():
return render_template('index/search/search_portrait.html')
@mod.route('/search_all/')
@login_required
def search_all():
return render_template('index/search/search_all.html')
@mod.route('/search_context/')
@login_required
def search_context():
return render_template('index/search/search_context.html')
@mod.route('/search_results/')
@login_required
def search_results():
stype = request.args.get('stype','')
uid = request.args.get('uid', '')
uname = request.args.get('uname', '')
location = request.args.get('location', '')
activity_geo = request.args.get('activity_geo', '')
adkeyword = request.args.get('adkeyword', '')
hashtag = request.args.get('hashtag', '')
psycho_status = request.args.get('psycho_status', '')
psycho_feature = request.args.get('psycho_feature', '')
domain = request.args.get('domain', '')
topic = request.args.get('topic', '')
tag = request.args.get('tag', '')
if (stype == '1'):
return render_template('index/search/search_results.html', uid=uid, uname=uname,\
location=location, activity_geo=activity_geo, adkeyword=adkeyword, hashtag=hashtag, psycho_status=psycho_status,\
psycho_feature=psycho_feature, domain=domain, topic=topic, tag=tag)
elif (stype == '2'):
return render_template('index/search/group_search_results.html', uid=uid, uname=uname,\
location=location, activity_geo=activity_geo, adkeyword=adkeyword, hashtag=hashtag, psycho_status=psycho_status,\
psycho_feature=psycho_feature, domain=domain, topic=topic, tag=tag)
elif (stype == '3'):
return render_template('index/search/tag_search_results.html', uid=uid, uname=uname,\
location=location, activity_geo=activity_geo, adkeyword=adkeyword, hashtag=hashtag, psycho_status=psycho_status,\
psycho_feature=psycho_feature, domain=domain, topic=topic, tag=tag)
else:
return render_template('index/search/search_results.html', uid=uid, uname=uname,\
location=location, activity_geo=activity_geo, adkeyword=adkeyword, hashtag=hashtag, psycho_status=psycho_status,\
psycho_feature=psycho_feature, domain=domain, topic=topic, tag=tag)
@mod.route('/text_search/')
@login_required
def text_search():
words_list = request.args.get('words_list', '')
return render_template('index/search/search_text_result.html', words_list=words_list)
@mod.route('/influence/')
@login_required
def influence():
return render_template('index/influence.html')
@mod.route('/monitor/')
@login_required
def monitor():
return render_template('index/monitor.html')
@mod.route('/connect/')
@login_required
def connect():
return render_template('index/search_connect.html')
@mod.route('/portrait/')
@login_required
def portrait():
return render_template('index/search_portrait.html')
@mod.route('/group_list/')
@login_required
def group_list():
return render_template('index/group_list.html')
@mod.route('/personal/')
@login_required
def personal():
uid = request.args.get('uid', '1215031834')
uid = str(uid)
return render_template('index/personal.html', uid=uid)
@mod.route('/personal_contect/')
@login_required
def personal_contect():
return render_template('index/contact.html')
@mod.route('/sensitive_person/')
@login_required
def sensitive_person():
uid = request.args.get('uid', '2697649164')
uid = str(uid)
return render_template('index/sensitive_person.html', uid=uid)
@mod.route('/contact/')
@login_required
def contact():
uid = request.args.get('uid', '1022866242')
return render_template('index/contact.html', uid=uid)
@mod.route('/sensing_weibo/')
@login_required
def sensing_weibo():
return render_template('index/sensing_weibo.html')
@mod.route('/sensing_analysis/')
@login_required
def sensing_analysis():
task_name = request.args.get('task_name','监督维权律师' )
user = request.args.get('user', 'admin')
ts = request.args.get('ts', '1378567800' )
return render_template('index/sensing_analysis.html', task_name=task_name,user=user,ts=ts)
|
import abc
from typing import Iterator, Tuple, Optional, Type, NamedTuple, Union, Callable
"""
We define `__all__` variable in order to set which names will be
imported when writing (from another file):
>>> from framework.graph_search.graph_problem_interface import *
"""
__all__ = ['GraphProblemState', 'GraphProblem', 'GraphProblemStatesPath', 'SearchNode',
'SearchResult', 'GraphProblemSolver',
'HeuristicFunction', 'HeuristicFunctionType', 'NullHeuristic']
class GraphProblemState(abc.ABC):
"""
This class defines an *interface* used to represent a state of a states-space, as learnt in class.
Notice that this is an *abstract* class. It does not represent a concrete state.
The inheritor class must implement the abstract methods defined by this class.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
This is an abstract method that must be implemented by the inheritor class.
This method is used to determine whether two given state objects represents the same state.
Notice: Never compare floats using `==` operator!
"""
...
@abc.abstractmethod
def __hash__(self):
"""
This is an abstract method that must be implemented by the inheritor class.
This method is used to create a hash of a state.
It is critical that two objects representing the same state would have the same hash!
A common implementation might be something in the format of:
>>> hash((self.some_field1, self.some_field2, self.some_field3))
Notice: Do NOT give float fields to `hash()`. Otherwise the upper requirement would not met.
"""
...
@abc.abstractmethod
def __str__(self):
"""
This is an abstract method that must be implemented by the inheritor class.
This method is used by the printing mechanism of `SearchResult`.
"""
class GraphProblem(abc.ABC):
"""
This class defines an *interface* used to represent a states-space, as learnt in class.
Notice that this is an *abstract* class. It does not represent a concrete states-space.
The inheritor class must implement the abstract methods defined by this class.
By defining these abstract methods, the inheritor class represents a well-defined states-space.
"""
"""Each problem might have a name as a string. This name is used in the solution printings."""
name: str = ''
def __init__(self, initial_state: GraphProblemState):
self.initial_state = initial_state
@abc.abstractmethod
def expand_state_with_costs(self, state_to_expand: GraphProblemState) -> Iterator[Tuple[GraphProblemState, float]]:
"""
This is an abstract method that must be implemented by the inheritor class.
This method represents the `Succ: S -> P(S)` function learnt in class.
It receives a state and iterates over the successor states.
Notice that this is an *Iterator*. Hence it should be implemented using the `yield` keyword.
For each successor, a pair of the successor state and the operator cost is yielded.
"""
...
@abc.abstractmethod
def is_goal(self, state: GraphProblemState) -> bool:
"""
This is an abstract method that must be implemented by the inheritor class.
It receives a state and returns whether this state is a goal.
"""
...
def solution_additional_str(self, result: 'SearchResult') -> str:
"""
This method may be overridden by the inheritor class.
It is used to enhance the printing method of a found solution.
We implemented it wherever needed - you do not have to care about it.
"""
return ''
class GraphProblemStatesPath(Tuple[GraphProblemState]):
"""
This class represents a path of states.
It is just a tuple of GraphProblemState objects.
We define a dedicated class in order to implement the string formatting method.
"""
def __eq__(self, other):
assert isinstance(other, GraphProblemStatesPath)
if len(other) != len(self):
return False
return all(s1 == s2 for s1, s2 in zip(self, other))
def __str__(self):
return '[' + (', '.join(str(state) for state in self)) + ']'
class SearchNode:
"""
An object of type `SearchNode` represent a node created by a search algorithm.
A node basically has a state that it represents, and potentially a parent node.
A node may also have its cost, the cost of the operator performed to reach this node,
and the f-score of this node (expanding_priority) when needed.
"""
def __init__(self, state: GraphProblemState,
parent_search_node: Optional['SearchNode'] = None,
operator_cost: float = 0,
expanding_priority: Optional[float] = None):
self.state: GraphProblemState = state
self.parent_search_node: SearchNode = parent_search_node
self.operator_cost: float = operator_cost
self.cost: Optional[float] = None
self.expanding_priority: Optional[float] = expanding_priority
self.cost = operator_cost
if self.parent_search_node is not None:
self.cost += self.parent_search_node.cost
def traverse_back_to_root(self) -> Iterator['SearchNode']:
"""
This is an iterator. It iterates over the nodes in the path
starting from this node and ending in the root node.
"""
node = self
while node is not None:
assert(isinstance(node, SearchNode))
yield node
node = node.parent_search_node
def make_states_path(self) -> GraphProblemStatesPath:
"""
:return: A path of *states* represented by the nodes
in the path from the root to this node.
"""
path = [node.state for node in self.traverse_back_to_root()]
path.reverse()
return GraphProblemStatesPath(path)
class SearchResult(NamedTuple):
"""
It is the type of the object that is returned by `solver.solve_problem()`.
It stores the results of the search.
"""
"""The solver that generated this result."""
solver: 'GraphProblemSolver'
"""The problem that the solver has attempted to solve."""
problem: GraphProblem
"""The node that represents the goal found. Set to `None` if no result had been found."""
final_search_node: Optional[SearchNode]
"""The number of expanded states during the search."""
nr_expanded_states: int
"""The time (in seconds) took to solve."""
solving_time: float
def __str__(self):
"""
Enhanced string formatting for the search result.
"""
res_str = '{problem_name: <35}' \
' {solver_name: <27}' \
' time: {solving_time:6.2f}' \
' #dev: {nr_expanded_states: <5}'.format(
problem_name=self.problem.name,
solver_name=self.solver.solver_name,
solving_time=self.solving_time,
nr_expanded_states=self.nr_expanded_states
)
# no solution found by solver
if self.final_search_node is None:
return res_str + ' NO SOLUTION FOUND !!!'
path = self.make_path()
res_str += ' total_cost: {cost:11.5f}' \
' |path|: {path_len: <3}' \
' path: {path}'.format(
cost=self.final_search_node.cost,
path_len=len(path),
path=str(path)
)
additional_str = self.problem.solution_additional_str(self)
if additional_str:
res_str += ' ' + additional_str
return res_str
def make_path(self):
return self.final_search_node.make_states_path()
class GraphProblemSolver(abc.ABC):
"""
This class is simply just an interface for graph search algorithms.
Each search algorithm that we are going to implement will inherit
from this class and implement the `solve_problem()` method.
"""
"""The solver name is used when printing the search results.
It may be overridden by the inheritor algorithm."""
solver_name: str = 'GraphProblemSolver'
@abc.abstractmethod
def solve_problem(self, problem: GraphProblem) -> SearchResult:
...
class HeuristicFunction(abc.ABC):
"""
This is an interface for a heuristic function.
Each implementation of a concrete heuristic function inherits from this class.
"""
"""Used by the solution printings.
Might be overridden by the inheritor heuristic."""
heuristic_name = ''
def __init__(self, problem: GraphProblem):
self.problem = problem
@abc.abstractmethod
def estimate(self, state: GraphProblemState) -> float:
"""
Calculates and returns the heuristic value for a given state.
This is an abstract method that must be implemented by the inheritor.
"""
...
"""Search algorithm which uses a heuristic may receive in their
constructor the type of the heuristic to use, rather than an
already-created instance of the heuristic."""
HeuristicFunctionType = Union[Type[HeuristicFunction], Callable[[GraphProblem], HeuristicFunction]]
class NullHeuristic(HeuristicFunction):
"""
This is a simple implementation of the null heuristic.
It might be used with A* for a sanity-check (A* should
behave exactly like UniformCost in that case).
"""
heuristic_name = '0'
def estimate(self, state: GraphProblemState) -> float:
return 0
|
def test_palindrome(cara):
cara_original = cara
# cara = cara.strip()
# cara = cara.lower()
# cara = cara.split(" ")
# cara = "".join(cara)
cara = "".join(cara.strip().lower().split(" "))
tmp = ""
for i in range(len(cara),0,-1):
tmp += cara[i-1]
if tmp == cara:
print("\"{}\" est un palindrome".format(cara_original))
else:
print("\"{}\" n est pas un palindrome".format(cara_original))
test_palindrome("ressasser")
test_palindrome("engage le jeu que je le gagne")
test_palindrome("radar")
test_palindrome("never odd or even")
test_palindrome("karine alla en Iran")
test_palindrome("un roc si biscornu")
|
"""
Napisz program obliczający średnią wartość temperatury w danym
tygodniu na podstawie temperatur wprowadzonych przez
użytkownika.
"""
suma_temperatur = 0
numer_dnia = 1
LICZBA_DNI_TYGODNIA = 7
while numer_dnia <= LICZBA_DNI_TYGODNIA:
suma_temperatur += int(input(f"Podaj temperaturę z dnia {numer_dnia}: "))
numer_dnia += 1
srednia_temperatur = suma_temperatur / LICZBA_DNI_TYGODNIA
print(f"Srednia temperatura w tym tygodniu to {srednia_temperatur}")
|
"""Tests for respite.views."""
from datetime import datetime
from nose.tools import *
from django.conf import settings
from django.test.client import Client
from . import monkeys
from .project.app.models import Article, Author
import json
client = Client()
def setup():
Article.objects.create(
title = 'Title',
content = 'Content',
author = Author.objects.create(
name = 'John Doe'
),
created_at = datetime(1970, 1, 1)
)
def teardown():
Article.objects.all().delete()
@with_setup(setup, teardown)
def test_disregards_multiple_formats_in_accept_header():
response = client.get('/news/articles/', HTTP_ACCEPT='application/json, application/xml')
assert_equal(response['Content-Type'], 'text/html; charset=utf-8')
@with_setup(setup, teardown)
def test_index():
response = client.get('/news/articles/')
assert response.status_code == 200
@with_setup(setup, teardown)
def test_show():
response = client.get('/news/articles/1.json')
assert 'article' in json.loads(response.content)
assert response.status_code == 200
response = client.get('/news/articles/2', HTTP_ACCEPT='application/json')
assert response['Content-Type'] == 'application/json; charset=%s' % settings.DEFAULT_CHARSET
assert response.status_code == 404
response = client.get('/news/articles/2', HTTP_ACCEPT='text/html')
assert response['Content-Type'] == 'text/html; charset=%s' % settings.DEFAULT_CHARSET
assert response.status_code == 404
@with_setup(setup, teardown)
def test_new():
response = client.get('/news/articles/new.json')
assert 'form' in json.loads(response.content)
assert response.status_code == 200
@with_setup(setup, teardown)
def test_create():
response = client.post('/news/articles/', HTTP_ACCEPT='application/json')
assert 'form' in json.loads(response.content)
assert response.status_code == 400
response = client.post('/news/articles/', {
'title': 'Title',
'content': 'Content',
'author': '1',
'created_at': '1970-01-01 00:00:00'
}, HTTP_ACCEPT='application/json')
assert 'article' in json.loads(response.content)
assert response.status_code == 201
@with_setup(setup, teardown)
def test_edit():
response = client.get('/news/articles/1/edit')
assert response.status_code == 200
@with_setup(setup, teardown)
def test_replace():
from urllib import urlencode
response = client.put('/news/articles/1')
assert response.status_code == 400
response = client.put(
path = '/news/articles/1.json',
data = urlencode({
'title': 'Title',
'content': 'Content',
'author': '1',
'created_at': '1970-01-01 00:00:00'
}),
content_type='application/x-www-form-urlencoded'
)
assert response.status_code == 200
@with_setup(setup, teardown)
def test_update():
response = client.patch(
path = '/news/articles/1',
data = {
'title': 'New title',
'is_published': 'true'
},
content_type='application/x-www-form-urlencoded'
)
article = Article.objects.get(id=1)
assert article.title == 'New title'
assert article.is_published == True
@with_setup(setup, teardown)
def test_destroy():
response = client.delete('/news/articles/1')
assert response.status_code == 200
@with_setup(setup, teardown)
def test_custom_action():
response = client.get('/news/articles/1/preview')
assert response.status_code == 200
response = client.get('/news/articles/2/preview')
assert response.status_code == 404
@with_setup(setup, teardown)
def test_custom_action_with_error():
response = client.get('/news/articles/1337/preview.json')
assert json.loads(response.content) == {
'error': {
'message': 'The article could not be found.'
}
}
assert response.status_code == 404
@with_setup(setup, teardown)
def test_options():
response = client.options('/news/articles/', HTTP_ACCEPT='application/json')
assert response.status_code == 200
assert set(response['Allow'].split(', ')) == set(['GET', 'POST'])
@with_setup(setup, teardown)
def test_options_with_unsupported_format():
response = client.options('/news/articles/', HTTP_ACCEPT='text/html')
assert response.status_code == 406
@with_setup(setup, teardown)
def test_head():
response = client.head('/news/articles/1', HTTP_ACCEPT='application/json')
assert response.status_code == 200
assert response.content == ''
@with_setup(setup, teardown)
def test_unsupported_method():
response = client.post('/news/articles/1')
assert response.status_code == 405
@with_setup(setup, teardown)
def test_reverse():
from django.core.urlresolvers import reverse
assert reverse('articles')
assert reverse('article', args=[1])
assert reverse('edit_article', args=[1])
assert reverse('new_article')
assert reverse('preview_article', args=[1])
@with_setup(setup, teardown)
def test_content_types():
from django.conf import settings
from respite import formats
from .project.app.views import ArticleViews
response = client.get('/news/articles/1', HTTP_ACCEPT='*/*,application/json')
assert response['Content-Type'] == '%s; charset=%s' % (
formats.find(ArticleViews.supported_formats[0]).content_type,
settings.DEFAULT_CHARSET
)
response = client.get('/news/articles/1', HTTP_ACCEPT='unsupported/format, */*')
assert response['Content-Type'] == '%s; charset=%s' % (
formats.find(ArticleViews.supported_formats[0]).content_type,
settings.DEFAULT_CHARSET
)
|
from SPARQLWrapper import SPARQLWrapper, JSON
import json
from config import settings
import urllib2
class ExperimentalCondition:
def __init__(self,host,uri):
self.host = host
self.uri = uri
try:
#raise urllib2.URLError('Testing')
sparql = SPARQLWrapper(host)
sparql.setQuery("""
select distinct ?subject ?label ?num where {{
?subject ?predicate <{}> .
?subject <http://purl.org/dc/terms/title> ?label .
FILTER(STRENDS(STR(?label), "_measure")) .
?subject <http://www.ontology-of-units-of-measure.org/resource/om-2#hasNumericalValue> ?num .
}}
""".format(uri))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
self.conditions = {}
for result in results["results"]["bindings"]:
self.conditions[result["label"]["value"]] = result["num"]["value"]
sparql.setQuery("""
select distinct ?plasmid where {{
<{}> <http://sd2e.org#plasmid> ?plasmid.
}}
""".format(uri))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
self.conditions['plasmids'] = []
for result in results["results"]["bindings"]:
self.conditions["plasmids"].append(result["plasmid"]["value"])
except urllib2.URLError as e:
print 'Warning, cound not collect to {}'.format(host)
self.conditions = {}
self.conditions['uri'] = uri
def to_string(self,keys,seperator=","):
return seperator.join(map(lambda k: self.conditions[k], keys))
def __str__(self):
return json.dumps(self.conditions)
# testing method
if __name__ == '__main__':
e = ExperimentalCondition("https://hub-api.sd2e.org/sparql","http://hub.sd2e.org/user/nicholasroehner/rule_30/pAN1717_system_5_0p002_1/1")
print e
|
#FUNCTIONS
def hello():
print("Hello!")
hello()
def add(x, y):
result = x + y
print(result)
add(58, 83)
def names(name):
print(f"Hello {name}")
names("Bob")
def divide(dividend, divisor):
if divisor != 0:
print(dividend / divisor)
else:
print("Can't divide by 0")
divide(5, 10)
#DEFAULTS VALUES
def add(x, y=8):
return x + y
result = add(5)
print(result) |
'''
Usage:
"Usage: python getUserRepeatDist.py <path-to-tweets-pickle-dir>"
Behavior:
plots the number of tweets per user in our data set.
'''
import cPickle
import os
import sys
import time
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
directory_name = "partitioned-tweets/tweets-pickled" #default
field = 'name' #default
def setUpPlot(field):
fig = plt.figure()
p = fig.add_subplot(2,1,1)
p.set_yscale('log')
p.set_xscale('log')
p.set_xlabel("count tweets by user")
p.set_ylabel("num users with this tweet count")
return p
def addTweetFieldValueToCounter(counter, tweet, field):
if field in tweet:
value = tweet[field]
counter[value] = counter[value] + 1
else:
global num_absent
num_absent += 1
def processTweets(tweets, counter, field):
print(len(tweets))
for t in tweets:
addTweetFieldValueToCounter(counter, t, field)
nameToCount = Counter()
num_absent = 0
t0 = time.clock() #reports how long it takes to run
if len(sys.argv) != 2:
print "Usage: python getUserRepeatDist.py <path-to-tweets-pickle-dir>"
else:
directory_name = sys.argv[1]
print("dir: " + directory_name)
print("field: " + field)
try:
for filename in os.listdir(directory_name):
tweets_pickle_file = open(directory_name + "/" + filename, "rb")
tweets = cPickle.load(tweets_pickle_file)
tweets_pickle_file.close()
processTweets(tweets, nameToCount, field)
#testTweets(tweets)
except OSError:
print "File reading problem using directory '" + directory_name + "'."
#reporting
print ("num unique values: " + str(len(nameToCount.items())))
print("num tweets without value: " + str(num_absent))
names, counts = zip(*(nameToCount.items()))
maxCount = max(counts)
hist, counts = np.histogram(counts, maxCount + 1, (-.5, maxCount + .5))
#plot histogram
p = setUpPlot(field)
p.scatter(counts[:-1], hist)
plt.show()
print ("run time: " + str( time.clock() - t0))
|
import os
import json
from struct import unpack
import Tkinter, tkFileDialog
root = Tkinter.Tk()
root.withdraw()
file_path = tkFileDialog.askopenfilename()
hoff = 0
buff = ''
units = {}
def rao(offset, size):
return buff[offset:offset+size]
def read_instance(offset):
position = unpack("<fff", rao(offset, 12))
rotation = unpack("<ffff", rao(offset+12, 16))
return position, rotation
f = open(file_path, 'rb')
buff = f.read()
f.close()
unit_count = unpack("<i", rao(0, 4))[0]
headers_offset = unpack("<i", rao(8, 4))[0]
for header in xrange(unit_count):
header_offset = headers_offset + (32 * header)
idstring = unpack("<q", rao(header_offset, 8))[0]
instance_count = unpack("<i", rao(header_offset+12, 4))[0]
instance_offset = unpack("<i", rao(header_offset+20, 4))[0]
instances = []
for instance in xrange(instance_count):
instances.append(read_instance(instance_offset + (instance*28)))
units[idstring] = instances
print units
|
from selenium import webdriver
import time
import unittest
import HtmlTestRunner
class suite(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome('chromedriver.exe')
def test_busqueda(self):
self.driver.get("https://www.google.com/")
self.busqueda = self.driver.find_element_by_name('q')
self.busqueda.send_keys("selenium")
self.busqueda.submit()
time.sleep(3)
def test_scroll_down(self):
self.driver.get("https://www.amazon.com/")
time.sleep(2)
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(5)
def test_link_text(self):
self.driver.get("http://www.w3schools.com/")
time.sleep(3)
encontrar_link = self.driver.find_element_by_link_text("Learn PHP")
encontrar_link.click()
def tearDown(self):
self.driver.close()
self.driver.quit()
if __name__ == '__main__':
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='Resultados Test')) |
# MIT License
#
# Copyright (c) 2022 Quandela
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# As a special exception, the copyright holders of exqalibur library give you
# permission to combine exqalibur with code included in the standard release of
# Perceval under the MIT license (or modified versions of such code). You may
# copy and distribute such a combined system following the terms of the MIT
# license for both exqalibur and Perceval. This exception for the usage of
# exqalibur is limited to the python bindings used by Perceval.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import perceval as pcvl
from perceval.components.unitary_components import BS, PS
from perceval.utils.algorithms.optimize import optimize
from perceval.utils.algorithms.norm import fidelity, frobenius
import time
import random
pattern1 = pcvl.Circuit(2, name="pattern1") // (0, PS(pcvl.P("phi1"))) // (0, BS.H()) //\
(0, PS(pcvl.P("phi2"))) // (0, BS.H())
rewrite1 = pcvl.Circuit(2, name="rewrite1") // (0, PS(pcvl.P("phi1"))) // (0, BS.H(theta=BS.r_to_theta(0.42))) //\
(0, PS(pcvl.P("phi2"))) // (0, BS.H(theta=BS.r_to_theta(0.42))) // (0, PS(pcvl.P("phi3"))) //\
(1, PS(pcvl.P("phi4")))
pattern2 = pcvl.Circuit(1, name="pattern2") // PS(pcvl.P("phi1")) // PS(pcvl.P("phi2"))
rewrite2 = pcvl.Circuit(1, name="rewrite2") // PS(pcvl.P("phi"))
pattern3 = pcvl.Circuit(2, name="pattern3") // (1, PS(pcvl.P("phip"))) // (0, BS.H(theta=BS.r_to_theta(0.42)))
rewrite3 = pcvl.Circuit(2, name="rewrite3") // (0, PS(pcvl.P("phi1"))) // (0, BS.H(theta=BS.r_to_theta(0.42))) //\
(0, PS(pcvl.P("phi2"))) // (1, PS(pcvl.P("phi3")))
a = pcvl.Circuit.generic_interferometer(8,
lambda idx: pcvl.Circuit(2) // PS(phi=random.random()) // BS.H()
// PS(phi=random.random()) // BS.H(),
shape="rectangle")
u = a.compute_unitary(use_symbolic=False)
current = time.time()
def tick(description):
global current
dt = time.time()-current
print("%f\t%s" % (dt, description))
current = time.time()
rules = [(pattern1, rewrite1, "lightgreen"), (pattern2, rewrite2, "pink"), (pattern3, rewrite3, "lightgray")]
while True:
found_match = False
for pattern, rewrite, color in rules:
start_pos = 0
matched = a.match(pattern, browse=True, pos=start_pos)
if matched is None:
break
found_match = True
idx = a.isolate(list(matched.pos_map.keys()), color=color)
for k, v in matched.v_map.items():
pattern.param(k).set_value(v)
v = pattern.compute_unitary(False)
res = optimize(rewrite, v, frobenius, sign=-1)
subc = rewrite.copy()
found = True
a.replace(idx, subc, merge=False)
a.replace(idx, subc, merge=True)
pattern.reset_parameters()
rewrite.reset_parameters()
start_pos = idx
print(pattern.name, res.fun, fidelity(u, a.compute_unitary(False)))
if not found_match:
break
|
# from tensorflow import keras
## keras backend for amd gpu --> IS ACTUALLY SLOWER THAN CPU ON 2015 iMac
# import os
# os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
# import keras
# from keras import backend as K
# from keras.models import Sequential
# from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, SpatialDropout2D, BatchNormalization
# from keras.datasets import mnist
# from keras.utils import to_categorical
# from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# CPU backend
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, ZeroPadding2D, SpatialDropout2D, BatchNormalization, AveragePooling1D
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from tensorflow.keras.callbacks import ModelCheckpoint
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-deep')
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from PIL import Image
np.random.seed(4666) # for reproducibility
def load_and_featurize_data():
# Read in data
df = pd.read_csv('data/classes.csv')
# Mask for Oak and Maple only
df = df[(df['class'] == 'oak') | (df['class'] == 'maple')]
# Train test split
train_df, val_df = train_test_split(df, test_size=0.25, random_state=4666)
val_df, test_df = train_test_split(val_df, test_size = .50, random_state=4666)
return train_df, val_df, test_df
def generators():
## Reduce overfit by shearing, zoom, flip
train_datagen = ImageDataGenerator(rescale=1./255., shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255.)
# Create generators for train, test, val to save memory
train_generator=train_datagen.flow_from_dataframe(dataframe=train_df, directory="./data/",
x_col="image_name", y_col="class", subset='training', batch_size=batch_size, seed=4666,
shuffle=True, class_mode="categorical", target_size=(100,100))
val_generator=test_datagen.flow_from_dataframe(dataframe=val_df, directory="./data/", x_col="image_name",
y_col="class", batch_size=batch_size, seed=4666, shuffle=True, class_mode="categorical",
target_size=(100,100))
test_generator=test_datagen.flow_from_dataframe(dataframe=test_df, directory="./data/", x_col="image_name",
y_col="class", batch_size=batch_size, seed=4666, shuffle=False, class_mode="categorical",
target_size=(100,100))
return train_generator, val_generator, test_generator
def define_model(nb_filters, kernel_size, input_shape, pool_size):
#nb_filters, kernel_size, input_shape, pool_size
model = Sequential() # model is a linear stack of layers (don't change)
model.add(Conv2D(input_shape=input_shape, filters=32, kernel_size=kernel_size, padding='valid', activation='relu'))
# model.add(Conv2D(filters=32, kernel_size=kernel_size, activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
# model.add(Dropout(0.5))
model.add(Conv2D(filters=32, kernel_size=kernel_size, padding='valid', activation='relu'))
# model.add(Conv2D(filters=32, kernel_size=kernel_size, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
model.add(Conv2D(filters=64, kernel_size=kernel_size, padding='valid', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=kernel_size, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0,5))
model.add(Conv2D(filters=64, kernel_size=kernel_size, padding='valid', activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))
# model.add(Conv2D(filters=64, kernel_size=kernel_size, padding='valid', activation='relu'))
# model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.5))
# #Flatten and Dense Layer
model.add(Flatten())
print('Model flattened out to ', model.output_shape)
# # #or subsetting
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
# model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(nb_classes, activation='softmax'))
# many optimizers available, see https://keras.io/optimizers/#usage-of-optimizers
# suggest you KEEP loss at 'categorical_crossentropy' for this multiclass problem,
# and KEEP metrics at 'accuracy'
# suggest limiting optimizers to one of these: 'adam', 'adadelta', 'sgd'
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
def plot_hist(hist):
n=8
fig, ax = plt.subplots(figsize = (12,18), nrows=2)
fig.suptitle('Conv:32:32:64:64:64;Full:128+DP:0.5', fontsize =20)
ax[0].plot(hist.history['val_loss'], label='val', zorder=20)
ax[0].plot(hist.history['loss'], label='train', zorder=30)
ax[0].legend(loc='upper right')
ax[0].set(ylabel='loss', ylim=[0, 1], xlabel='epoch')
ax[0].xaxis.label.set_size(20)
ax[0].yaxis.label.set_size(20)
ax[0].legend(loc='upper right',prop={'size': 15})
ax[1].plot(hist.history['val_accuracy'], label='val', zorder=20)
ax[1].plot(hist.history['accuracy'], label='train', zorder=30)
h = hist.history['val_accuracy']
avg_h = [(sum(h[i:(i + n)])) / n for i in range(len(h) - n)]
ax[1].plot(np.arange(n, len(h)), avg_h, color='red', label='val_trend', zorder=40)
ax[1].set(ylabel='accuracy', ylim=[0.55, 1.05], xlabel='epoch')
ax[1].xaxis.label.set_size(20)
ax[1].yaxis.label.set_size(20)
ax[1].legend(loc='upper left',prop={'size': 15})
plt.axhline(0.8, color='darkgoldenrod', linestyle='--', zorder=10, alpha=0.5)
plt.axhline(0.9, color='silver', linestyle='--',zorder=10, alpha=0.5)
plt.axhline(0.95, color='goldenrod', linestyle='--',zorder=10, alpha=0.5)
plt.savefig('images/3232646464D128DP5BS32plot.png')
plt.show()
if __name__ == '__main__':
# important inputs to the model: don't changes the ones marked KEEP
batch_size = 32 # number of training samples used at a time to update the weights
nb_classes = 2 # number of output possibilities: [0 - 9] KEEP
nb_epoch = 200 # number of passes through the entire train dataset before weights "final"
img_rows, img_cols = 100, 100 # the size of the MNIST images KEEP
input_shape = (img_rows, img_cols, 3) # 1 channel image input (grayscale) KEEP
nb_filters = 32 # number of convolutional filters to use
pool_size = (2, 2) # pooling decreases image size, reduces computation, adds translational invariance
kernel_size = (3, 3) # convolutional kernel size, slides over image to learn features
# strides = (1, 1)
train_df, val_df, test_df = load_and_featurize_data()
train_generator, val_generator, test_generator = generators()
model = define_model(nb_filters, kernel_size, input_shape, pool_size)
steps_per_epoch = int(train_df.shape[0] / batch_size)
# model.summary()
hist = model.fit(train_generator, steps_per_epoch = steps_per_epoch, epochs = nb_epoch, verbose = 1, validation_data=val_generator, validation_steps=val_df.shape[0]//batch_size)
#Call plot function
plot_hist(hist)
# during fit process watch train and test error simultaneously
score = model.evaluate(test_generator, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1]) # this is the one we care about
checkpoint = ModelCheckpoint(filepath='./temp/weights.hdf5', verbose=1, save_best_only=True)
|
#
#Copyright (c) 2018 Jie Zheng
#
from e3net.inventory.invt_base import get_inventory_base
from e3net.common.e3exception import e3_exception
from e3net.common.e3exception import E3_EXCEPTION_IN_USE
from e3net.common.e3exception import E3_EXCEPTION_NOT_FOUND
from e3net.common.e3exception import E3_EXCEPTION_INVALID_ARGUMENT
from e3net.common.e3exception import E3_EXCEPTION_OUT_OF_RESOURCE
from e3net.common.e3exception import E3_EXCEPTION_NOT_SUPPORT
from e3net.common.e3exception import E3_EXCEPTION_BE_PRESENT
from e3net.inventory.invt_vswitch_lan_zone import invt_get_vswitch_lan_zone
from e3net.inventory.invt_vswitch_ether_service_vlan import invt_list_vswitch_ether_service_vlans
#
#try to find a free vlan id for a lanzone,
#but do not lock or preserve it,
#there is still a chance that a found vlan id will fail to be registered
#but it seldom occurs
def invt_search_vlan_id_for_lanzone(lanzone_id):
lanzone = invt_get_vswitch_lan_zone(lanzone_id)
vlans = invt_list_vswitch_ether_service_vlans()
flag = bytearray(4096)
for vlan_id in vlans:
vlan = vlans[vlan_id]
if vlan.lanzone_id != lanzone_id:
continue
assert (not flag[vlan.vlan_id])
flag[vlan.vlan_id] = 1
min_vlan = lanzone.min_vlan if lanzone.min_vlan >= 1 and lanzone.min_vlan <= 4095 else 1
max_vlan = lanzone.max_vlan if lanzone.max_vlan <= 4095 and lanzone.max_vlan >= 1 else 4095
for i in range(min_vlan, max_vlan):
if not flag[i]:
return i
raise e3_exception(E3_EXCEPTION_OUT_OF_RESOURCE)
|
import cv2
import numpy as np
def make_points(image, line):
slope, intercept = lines
y1 = int(image.shape[0])
y2 = int(y1*3/5)
x1 = int((y1-intercept)/slope)
x2 = int((y2-intercept)/slope)
return [[x1, y1, x2, y2]]
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
if lines is None:
return None
for line in lines:
for x1, y1, x2, y2 in line:
fit = np.polyfit((x1,x2), (y1,y2), 1)
slope = fit[0]
intercept = fit[1]
if slope<0:
left_fit.append((slope,intercept))
else:
right_fit.append((slope,intercept))
left_fit_average = np.average(left_fit, axis = 0)
right_fit_average = np.average(right_fit, axis = 0)
left_line = make_points(image, left_fit_average)
right_line = make_points(image, right_fit_average)
averaged_lines = [left_line, right_line]
return averaged_lines
def canny(image):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
kernel = 5
blur = cv2.GaussianBlur(gray,(kernel,kernel),0)
canny = cv2.Canny(gray,50,150)
return canny
def display_lines(img, lines):
line_image = np.zeros_like(img)
if lines is not None:
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),10)
return line_image
def region_of_interest(canny):
height = canny.shape[0]
width = canny.shape[1]
mask = np.zeros_like(canny)
polygon = np.array([[
(350,height),(width,500),(450,275),]],np.int32)
cv2.fillPoly(mask, polygon,(255,255,255))
masked_image = cv2.bitwise_and(canny,mask)
return masked_image
image = cv2.imread('test_image.jpg')
lane_image = np.copy(image)
lane_canny = canny(lane_image)
cropped_canny = region_of_interest(lane_canny)
lines = cv2.HoughLinesP(cropped_canny.astype(int),2,np.pi/180,np.array([]),minLineLength = 40, maxLineGap = 5)
averaged_lines = average_slope_intercept(image,lines)
line_image = display_lines(lane_image,averaged_lines)
combo_image = cv2.addWeighted(lane_image, 0.8, line_image,1,0)
cv2.imshow('result', combo_image)
cv2.waitKey(0)
|
# ----------------------------------------------------------------------------
# GS Nodegraph Copyright 2019-2021 by Noah Rahm and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import wx
import uuid
from .socket import NodeSocket
from ..constants import *
class NodeBase(object):
def __init__(self, nodegraph, _id):
self._nodegraph = nodegraph
self._id = _id
self._idname = None
self._pos = wx.Point(0, 0)
self._size = wx.Size(NODE_DEFAULT_WIDTH, NODE_DEFAULT_HEIGHT)
self._selected = False
self._active = False
self._muted = False
self._sockets = []
self._parameters = {}
self._isoutput = False
self._label = ""
self._category = "DEFAULT"
self._headercolor = "#fff"
def _Init(self, idname):
self.InitSockets()
self.InitHeaderColor()
self.SetIdName(idname)
@property
def nodegraph(self):
return self._nodegraph
@nodegraph.setter
def nodegraph(self, nodegraph):
self._nodegraph = nodegraph
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, pos: wx.Point) -> None:
self._pos = pos
@property
def size(self) -> wx.Size:
return self._size
@size.setter
def size(self, size: wx.Size) -> None:
self._size = size
@property
def selected(self) -> bool:
return self._selected
@selected.setter
def selected(self, selected: bool) -> None:
self._selected = selected
@property
def active(self) -> bool:
return self._active
@active.setter
def active(self, active: bool) -> None:
self._active = active
@property
def muted(self) -> bool:
return self._muted
@muted.setter
def muted(self, muted: bool) -> None:
self._muted = muted
def AddSocket(self, label, color, direction):
self.ArrangeSockets()
def HitTest(self, pos):
# Handle socket hittest
for socket in self._sockets:
if socket.HitTest(pos - self.pos):
return socket
def EditParameter(self, idname, value):
pass
def InitHeaderColor(self):
self._headercolor = NODE_CATEGORY_COLORS[self.GetCategory()]
def InitSockets(self):
x, y, w, h = self.GetRect()
_id = wx.NewIdRef()
sockets = []
ins = []
outs = []
for param in self._parameters:
ins.append((param, "RENDERIMAGE"))
if self.IsOutputNode() is not True:
outs = [('Output', "RENDERIMAGE")]
x, y = self.pos
w, h = self.size
for i, p in enumerate(outs + ins):
socket_type = SOCKET_INPUT # Socket type IN
x = 0 # socket margin
if (p[0], p[1]) in outs:
x = w - x + 1
socket_type = SOCKET_OUTPUT # Socket type OUT
# We keep track of where the last socket is placed
lastcoord = 60 + 30 * i
socket = NodeSocket(p[0], p[1], self)
socket.direction = socket_type
socket.pos = wx.Point(x, 40 + (19 * i))
sockets.append(socket)
self._sockets = sockets
# Adjust the size of the node to fit
# the amount of sockets the node has.
self.size[1] = lastcoord
def IsOutputNode(self) -> bool:
""" Override method to set whether the node is the output or not. """
return self._isoutput
def GetLabel(self) -> str:
""" Override method to set the node label. """
return self._label
def GetCategory(self) -> str:
""" Override method to set the node category. """
return self._category
def GetIdname(self) -> str:
return self._idname
def SetIdName(self, idname):
self._idname = idname
def GetPosition(self) -> wx.Point:
return self.pos
def SetPosition(self, x, y):
self.pos = wx.Point(x, y)
def GetSize(self) -> wx.Size:
return (self.size[0], self.size[1])
def GetRect(self) -> wx.Rect:
return wx.Rect(self.pos[0], self.pos[1], self.size[0], self.size[1])
def IsSelected(self) -> bool:
return self.selected
def SetSelected(self, selected=True):
self.selected = selected
def IsActive(self) -> bool:
return self.active
def SetActive(self, active=True):
self.active = active
def IsMuted(self):
return self.muted
def SetMuted(self, muted=True):
self.muted = muted
def GetSockets(self) -> list:
return self._sockets
def Draw(self, dc):
x, y = self.GetPosition()
w, h = self.GetSize()
# Node body and border
if self.IsSelected() or self.IsActive():
dc.SetPen(wx.Pen(wx.Colour(255, 255, 255, 255), 2))
else:
dc.SetPen(wx.Pen(wx.Colour(31, 31, 31, 255), 2))
if self.IsMuted():
color = wx.Colour(70, 70, 70, 90)
else:
color = wx.Colour(70, 70, 70, 255)
dc.SetBrush(wx.Brush(color))
dc.DrawRoundedRectangle(x, y, w, h, 3)
# Node header and title
dc.SetPen(wx.Pen(wx.TRANSPARENT_PEN))
if self.IsMuted():
color = wx.Colour(70, 70, 70, 255)
else:
color = wx.Colour(self._headercolor)
dc.SetBrush(wx.Brush(color))
dc.DrawRoundedRectangle(x+1, y+1, w-3, 12, 2)
dc.DrawRectangle(x+1, y+10, w-3, 12)
fnt = self.nodegraph.GetFont()
dc.SetFont(fnt)
if self.IsMuted():
color = wx.Colour('#fff').ChangeLightness(60)
else:
color = wx.Colour('#fff').ChangeLightness(90)
dc.SetTextForeground(color)
dc.DrawText(self.GetLabel(), x+10, y+1)
for socket in self._sockets:
socket.Draw(dc)
|
from web.url.url import addURLToList,printURLList
from web.views import MainView
from web.views import AbortView
def loadurllist():
#GET URL
addURLToList(r'main(\/)?$','GET',MainView.MainView)
addURLToList(r'faveicon.ico(\/)?$','GET',AbortView.AbortView404)
addURLToList(r'signup(\/)?$','GET',MainView.SignupView)
addURLToList(r'signin(\/)?$','GET',MainView.SigninView)
addURLToList(r'viewcart(\/)?$','GET',MainView.ViewCartView)
addURLToList(r'checkout(\/)?$','GET',MainView.CheckOutView)
addURLToList(r'clearcart(\/)?$','GET',MainView.ClearCartView)
addURLToList(r'logout(\/)?$','GET',MainView.LogOutView)
addURLToList(r'search(\/)?$','GET',MainView.SearchView)
addURLToList(r'product(\/)?$','GET',MainView.ProductView)
#POST URL
addURLToList(r'signup(\/)?$','POST',MainView.SignupView)
addURLToList(r'signin(\/)?$','POST',MainView.SigninView)
#
printURLList()
|
# section02-1
# 파이썬 크롤링 기초
# urllib 사용법 및 기본 스크랩핑
import urllib.request as req
# 파일 url
img_url = "https://newsimg.hankookilbo.com/cms/articlerelease/2019/04/29/201904291390027161_3.jpg"
html_url = "https://google.com"
# 다운 받을 경로
save_path1 = "c:/Users/win7/Documents/GitHub/Crawling_Study/save_folder/test1.jpg"
save_path2 = "c:/Users/win7/Documents/GitHub/Crawling_Study/save_folder/index.html"
# 예외 처리
# urlretrieve함수는 파일이름, 수신 헤더값 2개 반환 (다운로드하는 함수임 (파일url, 다운받을 파일 경로))
try:
file1, header1 = req.urlretrieve(img_url, save_path1)
file2, header2 = req.urlretrieve(html_url, save_path2)
except Exception as e:
print("Download failed")
print(e)
else:
# Header 정보 출력
print(header1)
print(header2)
# 다운로드 파일 정보
print("Filename1 {}".format(file1))
print("Filename2 {}".format(file2))
print()
# 성공
print("Download Sucessed")
|
from .cell import Cell
from collections import deque
class Phenotype:
def __init__(self, input_coords, output_coords, genome):
self.genome = genome
egg = Cell(0, 0, 1, 1, 1, {}, {}, genome)
self.inputs = [
Cell(x, y, 0, 0, 0, {}, {egg: 1}, None) for x, y in input_coords
]
self.outputs = [
Cell(x, y, 0, 0, 0, {egg: 1}, {}, None) for x, y in output_coords
]
egg.inputs = {c: 1 for c in self.inputs}
egg.outputs = {c: 1 for c in self.outputs}
self.develop(egg)
def develop(self, egg):
max_divisions = self.genome.genes[0].parameters["max_divisions"].value
num_divisions = 0
idle = 0
q = deque([egg])
while num_divisions < max_divisions and idle < len(q):
new_cells = q.popleft().divide()
if len(new_cells) > 1:
num_divisions += 1
idle = 0
else:
idle += 1
q.extend(new_cells)
|
def update_matrix(matrix):
res = matrix
for row in range(len(matrix)):
for col in range(len(matrix[0])):
if matrix[row][col] == 1:
res[row][col] = dfs(matrix, row, col)
return res
def dfs(matrix, row, col):
if row < 0 or col < 0 or row >= len(matrix) or col >= len(matrix[0]) or matrix[row][col] == "X":
return float("inf")
if matrix[row][col] == 0:
return 0
temp = matrix[row][col]
matrix[row][col] = "X"
up = 1 + dfs(matrix, row-1, col)
down = 1 + dfs(matrix, row+1, col)
left = 1 + dfs(matrix, row, col-1)
right = 1 + dfs(matrix, row, col+1)
res = min(up, down, left, right)
matrix[row][col] = temp
return res
print(update_matrix([[0, 0, 0],
[0, 1, 0],
[1, 1, 1]]))
|
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import pytest
from git_code_debt.list_metrics import color
from git_code_debt.list_metrics import CYAN
from git_code_debt.list_metrics import main
from git_code_debt.list_metrics import NORMAL
from git_code_debt.util.compat import builtins
@pytest.yield_fixture
def print_mock():
with mock.patch.object(builtins, 'print') as print_mock:
yield print_mock
def test_list_metrics_smoke(print_mock):
# This test is just to make sure that it doesn't fail catastrophically
main([])
assert print_mock.called
def test_list_metrics_no_color_smoke(print_mock):
main(['--color', 'never'])
calls_args = [call[0][0] for call in print_mock.call_args_list]
assert all(['\033' not in calls_args])
def test_color_no_color():
ret = color('foo', 'bar', False)
assert ret == 'foo'
def test_colored():
ret = color('foo', CYAN, True)
assert ret == CYAN + 'foo' + NORMAL
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging, os
log_file = os.getenv('PYTHON_LOG')
logging.basicConfig(filename=log_file, filemode='a', level=logging.DEBUG)
logger = logging.getLogger('basic_logging')
|
import math
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error
def symmetric_mean_absolute_percentage_error(actual, forecast):
F = np.array(forecast)
A = np.array(actual)
return 100 / max(len(A), len(F)) * np.sum(2 * np.abs(F - A) / (np.abs(A) + np.abs(F)))
def naive_forecast_error(actual):
A = np.array(actual)
P = np.roll(A, 1)
P[0] = A[0]
return np.sum(np.abs(A - P)) / (len(A) - 1)
def mean_absolute_scaled_error(actual, forecast):
F = np.array(forecast)
A = np.array(actual)
return np.sum(np.abs(F - A) / naive_forecast_error(actual)) / max(len(A), len(F))
def interval_accuracy_score(actual, lbs, ubs, conf):
A = np.array(actual)
L = np.array(lbs)
U = np.array(ubs)
return np.sum(U - L + np.where(A < L, 1, 0) * (L - A) * 2 / conf +
np.where(A > U, 1, 0) * (A - U) * 2 / conf) / len(A)
def eval_model(model, pred_step, counts):
model = model[::pred_step]
counts = counts[::pred_step]
lbs = [5, 5, 5, 5, 5, 5, 5, 5, 5]
ubs = [5, 5, 5, 5, 5, 5, 5, 5, 5]
conf = 0.95
print("Total Values: {}".format(min(len(model), len(counts))))
print("Forecast: {}".format(model))
print("Validation Set: {}".format(counts))
print()
mae = mean_absolute_error(model, counts)
rmse = math.sqrt(mean_squared_error(model, counts))
mape = mean_absolute_percentage_error(model, counts)
smape = symmetric_mean_absolute_percentage_error(model, counts)
mase = mean_absolute_scaled_error(model, counts)
# ias = interval_accuracy_score(counts, lbs, ubs, conf)
print("Mean Absolute Error: {}".format(mae))
print("Root Mean Squared Error: {}".format(rmse))
print("Mean Absolute Percentage Error: {}".format(mape))
print("Symmetric Mean Absolute Percentage Error: {}".format(smape))
print("Mean Absolute Scaled Error: {}".format(mase))
# print("Mean Interval Accuracy Score: {}".format(ias))
return rmse
if __name__ == '__main__':
model = [5, 3, 8, 2, 1, 1, 7, 5, 5]
counts = [1, 2, 3, 4, 5, 6, 7, 8, 9]
eval_model(model, 1, counts)
|
import csv
from django.core.files.storage import FileSystemStorage
import datetime
from django.db.models import Q
from django.shortcuts import redirect, render
from django.views.generic import TemplateView, FormView, DetailView
from application.forms import CreateDispatcherApplicationsForm, ApplicationByCommissionerCreateForm, \
RefusalOfApplicationByDispatcherCreateForm, ChangePointInApplicationByDispatcherForm, DecisionCreateForm, \
CloseCommissionerApplicationForm, PhotoForApplicationByCommissionerForm, GetReportForm
from application.models import ApplicationByDispatcher, ApplicationByCommissioner, RefusalOfApplicationByDispatcher, \
Decision, ClosedApplication, PhotoForApplicationByCommissioner
from road.models import Point
class DispatcherApplicationsListView(TemplateView):
template_name = 'application/dispatcher_application_list.html'
def get_context_data(self):
context = super(DispatcherApplicationsListView, self).get_context_data()
dispatcher = self.request.user.dispatcher
context['applications_by_dispatcher'] = ApplicationByDispatcher.objects.filter(
dispatcher=dispatcher
).order_by(
'-created_at'
)
dispatcher_point_id = Point.objects.filter(section=dispatcher.section).values_list('id', flat=True)
context['applications_by_commissioners'] = ApplicationByCommissioner.objects.filter(
point__id__in=dispatcher_point_id
).order_by(
'-created_at'
)
return context
class DispatcherApplicationsCreateView(FormView):
form_class = CreateDispatcherApplicationsForm
template_name = 'application/create_disp_app.html'
def get_form_kwargs(self):
user = self.request.user
form_kwargs = super(DispatcherApplicationsCreateView, self).get_form_kwargs()
form_kwargs.update({'user': user})
return form_kwargs
def form_valid(self, form):
cleaned_data = form.cleaned_data
dispatcher = self.request.user.dispatcher
number = ApplicationByDispatcher.get_number_order()
ApplicationByDispatcher.objects.create(
dispatcher=dispatcher,
number=number,
point=Point.objects.get(id=cleaned_data['point']),
location=cleaned_data['location'],
status=ApplicationByDispatcher.STATUS.SENT,
comment=cleaned_data['comment'],
come_from_user=cleaned_data['come_from_user'],
)
return redirect('application:dispatcher_list')
class DispatcherApplicationsDetailView(DetailView):
model = ApplicationByDispatcher
template_name = 'application/detail_disp_app.html'
class DispatcherApplicationsAcceptedView(DetailView):
model = ApplicationByDispatcher
template_name = 'application/disp_app_accepted.html'
def post(self, request, *args, **kwargs):
app = self.get_object()
app = ApplicationByDispatcher.objects.get(id=self.kwargs.get('pk'))
app.status = ApplicationByDispatcher.STATUS.ACCEPTED
app.commissioner = self.request.user.commissioner
app.save(update_fields=['status', 'commissioner'])
return redirect('application:commissioner_list')
class ApplicationByCommissionerListView(TemplateView):
template_name = 'application/commissioner_app_list.html'
def get_context_data(self):
context = super(ApplicationByCommissionerListView, self).get_context_data()
commissioner = self.request.user.commissioner
applications_by_dispatcher = ApplicationByDispatcher.objects.filter(
Q(point=commissioner.point, status=ApplicationByDispatcher.STATUS.SENT) | Q(commissioner=commissioner)
).order_by(
'-created_at'
)
context['applications_by_dispatcher'] = applications_by_dispatcher
context['applications_by_commissioner'] = ApplicationByCommissioner.objects.filter(
commissioner=commissioner
).order_by(
'-created_at'
)
return context
class ApplicationByCommissionerCreateView(FormView):
form_class = ApplicationByCommissionerCreateForm
form_images_class = PhotoForApplicationByCommissionerForm
template_name = 'application/create_com_app.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
form_images = self.form_images_class()
return render(request, self.template_name, {'form': form, 'form_images': form_images})
def form_valid(self, form):
cleaned_data = form.cleaned_data
print(self.request.POST)
print(self.request.FILES)
images = self.request.FILES
commissioner = self.request.user.commissioner
number = ApplicationByCommissioner.get_number_order()
app = ApplicationByCommissioner.objects.create(
status=ApplicationByCommissioner.STATUS.SENT,
number=number,
commissioner=commissioner,
point=commissioner.point,
need_ambulance=cleaned_data['need_ambulance'],
need_police=cleaned_data['need_police'],
need_mchs=cleaned_data['need_mchs'],
need_tow_truck=cleaned_data['need_tow_truck'],
comment=cleaned_data['comment'],
location=cleaned_data['location']
)
form_images = self.form_images_class(self.request.POST, self.request.FILES, request=self.request)
form_images.save_for(app)
if self.kwargs.get('pk'):
app_by_dispatcher = ApplicationByDispatcher.objects.get(id=self.kwargs['pk'])
app.application_by_dispatcher = app_by_dispatcher
app.number = app_by_dispatcher.number
app.save(update_fields=['application_by_dispatcher', 'number'])
app_by_dispatcher.status = ApplicationByDispatcher.STATUS.APPLICATION_BY_COM_CREATED
app_by_dispatcher.commissioner = commissioner
app_by_dispatcher.save(update_fields=['status', 'commissioner'])
return redirect('application:commissioner_list')
class ApplicationByCommissionerDetailView(DetailView):
model = ApplicationByCommissioner
template_name = 'application/commissioner_app_detail.html'
def get_context_data(self, **kwargs):
context = super(ApplicationByCommissionerDetailView, self).get_context_data()
application_by_commissioner = ApplicationByCommissioner.objects.get(id=self.kwargs.get('pk'))
decision = Decision.objects.filter(
application_by_commissioner=application_by_commissioner
).first()
context['decision'] = decision
closed_app = ClosedApplication.objects.filter(application_by_commissioner=application_by_commissioner).first()
context['closed_app'] = closed_app
images = PhotoForApplicationByCommissioner.objects.filter(
application_by_commissioner=application_by_commissioner
)
context['images'] = images
return context
class RefusalOfApplicationByDispatcherCreateView(FormView):
form_class = RefusalOfApplicationByDispatcherCreateForm
template_name = 'application/create_refusal.html'
def form_valid(self, form):
app = ApplicationByDispatcher.objects.get(id=self.kwargs.get('pk'))
cleaned_data = form.cleaned_data
commissioner = self.request.user.commissioner
RefusalOfApplicationByDispatcher.objects.create(
comment=cleaned_data['comment'],
commissioner=commissioner,
application_by_dispatcher=app
)
app.status = ApplicationByDispatcher.STATUS.RETURNED
app.point = None
app.save(update_fields=['status', 'point'])
return redirect('application:commissioner_list')
class ChangePointInApplicationByDispatcherView(FormView):
form_class = ChangePointInApplicationByDispatcherForm
template_name = 'application/change_point_in_app.html'
def get_form_kwargs(self):
user = self.request.user
form_kwargs = super(ChangePointInApplicationByDispatcherView, self).get_form_kwargs()
form_kwargs.update({'user': user})
return form_kwargs
def get_context_data(self, **kwargs):
ctx = super(ChangePointInApplicationByDispatcherView, self).get_context_data(**kwargs)
application = ApplicationByDispatcher.objects.get(id=self.request.GET.get('pk'))
ctx['application'] = application
ctx['refusals'] = RefusalOfApplicationByDispatcher.objects.filter(application_by_dispatcher=application)
return ctx
def form_valid(self, form):
app = ApplicationByDispatcher.objects.get(id=self.request.POST.get('app_id'))
app.point = Point.objects.get(id=self.request.POST.get('point'))
app.status = ApplicationByDispatcher.STATUS.SENT
app.save(update_fields=['point', 'status'])
return redirect('application:dispatcher_list')
class DecisionCreateView(FormView):
form_class = DecisionCreateForm
template_name = 'application/create_decision.html'
def form_valid(self, form):
app = ApplicationByCommissioner.objects.get(id=self.kwargs.get('pk'))
cleaned_data = form.cleaned_data
dispatcher = self.request.user.dispatcher
Decision.objects.create(
application_by_commissioner=app,
dispatcher=dispatcher,
called_mchs=cleaned_data['called_mchs'],
called_ambulance=cleaned_data['called_ambulance'],
called_police=cleaned_data['called_police'],
called_tow_truck=cleaned_data['called_tow_truck'],
comment=cleaned_data['comment']
)
app.status = ApplicationByCommissioner.STATUS.DECISION
app.save(update_fields=['status'])
return redirect('application:dispatcher_list')
class CloseCommissionerApplicationView(FormView):
form_class = CloseCommissionerApplicationForm
template_name = 'application/close_application.html'
def form_valid(self, form):
cleaned_data = form.cleaned_data
app = ApplicationByCommissioner.objects.get(id=self.kwargs.get('pk'))
ClosedApplication.objects.create(
application_by_commissioner=app,
comment=cleaned_data['comment']
)
app.status = ApplicationByCommissioner.STATUS.CLOSED
app.save(update_fields=['status'])
return redirect('application:commissioner_list')
class GetReportView(FormView):
template_name = 'application/get_report.html'
form_class = GetReportForm
def form_valid(self, form, *args, **kwargs):
cleaned_data = form.cleaned_data
section = self.request.user.dispatcher.section
print(ApplicationByCommissioner.objects.count())
applications = ApplicationByCommissioner.objects.filter(
point__section=section,
created_at__gte=cleaned_data['date_from'],
created_at__lte=cleaned_data['date_to']
)
for a in applications:
print(a.status)
print(cleaned_data['status'])
if cleaned_data['status']:
applications = applications.filter(status=cleaned_data['status'])
kwargs = {
'successful_date_from': cleaned_data['date_from'],
'successful_date_to': cleaned_data['date_to']
}
decisions = Decision.objects.filter(application_by_commissioner__in=applications)
closed_applications = ClosedApplication.objects.filter(application_by_commissioner__in=applications)
data = [
['Номер заявки', 'Время создания', 'Статус', 'Время заявки диспетчера', 'Комментарий диспетчера',
'Комиссар', 'Точка', 'Вызов скорой',
'Вызов полиции', 'Вызов МЧС', 'Вызов эвакуатора', 'Комментарий комиссара', 'Решение принял диспетчер',
'Время принятия решения', 'Комментария решения', 'Время закрытия заявки', 'Комментарий к закрытию']
]
for app in applications:
app_dict = {
'number': str(app.number),
'time_created': str(app.created_at.strftime('%d.%m.%y %H:%M')),
'status': str(app.status),
'app_disp': str(app.application_by_dispatcher.created_at.strftime('%d.%m.%y %H:%M')) if app.application_by_dispatcher else '-',
'comment_disp': str(app.application_by_dispatcher.comment) if app.application_by_dispatcher else '-',
'commissioner': str(app.commissioner.get_full_name()),
'point': str(app.point.get_name_with_section()),
'need_ambulance': '+' if app.need_ambulance else '-',
'need_police': '+' if app.need_police else '-',
'need_mchs': '+' if app.need_mchs else '-',
'need_tow_truck': '+' if app.need_tow_truck else '-',
'comment': str(app.comment),
}
if decisions.filter(application_by_commissioner=app).exists():
decision = decisions.filter(application_by_commissioner=app).first()
app_dict['decision_disp'] = str(decision.dispatcher.get_full_name())
app_dict['decision_created'] = str(decision.created_at.strftime('%d.%m.%y %H:%M'))
app_dict['decision_comment'] = str(decision.comment)
if closed_applications.filter(application_by_commissioner=app).exists():
closed = closed_applications.filter(application_by_commissioner=app).first()
app_dict['closed_created'] = str(closed.created_at.strftime('%d.%m.%y %H:%M'))
app_dict['closed_comment'] = str(closed.comment)
data.append(list(app_dict.values()))
with open(f'{cleaned_data["date_from"]}-{cleaned_data["date_to"]}-{datetime.datetime.now().strftime("%H-%M")}.csv', 'w+') as csv_file:
writer = csv.writer(csv_file)
for row in data:
writer.writerow(row)
return redirect('application:get_report')
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
#
# This module contains all the classes representing the Schema of the configuration file.
# These classes are created by following marshmallow syntax.
#
import copy
import logging
import re
from typing import List
from marshmallow import ValidationError, fields, post_load, pre_dump, validate, validates, validates_schema
from pcluster.config.cluster_config import (
AdditionalPackages,
AllocationStrategy,
AmiSearchFilters,
AwsBatchClusterConfig,
AwsBatchComputeResource,
AwsBatchQueue,
AwsBatchQueueNetworking,
AwsBatchScheduling,
AwsBatchSettings,
CapacityReservationTarget,
CapacityType,
CloudWatchDashboards,
CloudWatchLogs,
ClusterDevSettings,
ClusterIam,
ComputeSettings,
CustomAction,
CustomActions,
Dashboards,
Database,
Dcv,
DirectoryService,
Dns,
Efa,
EphemeralVolume,
ExistingFileCache,
ExistingFsxOntap,
ExistingFsxOpenZfs,
FlexibleInstanceType,
GpuHealthCheck,
HeadNode,
HeadNodeImage,
HeadNodeNetworking,
HeadNodeSsh,
HealthChecks,
Iam,
Image,
Imds,
IntelSoftware,
LocalStorage,
LoginNodes,
LoginNodesIam,
LoginNodesImage,
LoginNodesNetworking,
LoginNodesPool,
LoginNodesSsh,
LogRotation,
Logs,
Monitoring,
PlacementGroup,
Proxy,
QueueImage,
QueueUpdateStrategy,
Raid,
Roles,
RootVolume,
S3Access,
SharedEbs,
SharedEfs,
SharedFsxLustre,
SlurmClusterConfig,
SlurmComputeResource,
SlurmComputeResourceNetworking,
SlurmFlexibleComputeResource,
SlurmQueue,
SlurmQueueNetworking,
SlurmScheduling,
SlurmSettings,
Timeouts,
)
from pcluster.config.common import BaseTag
from pcluster.config.update_policy import UpdatePolicy
from pcluster.constants import (
DELETION_POLICIES,
DELETION_POLICIES_WITH_SNAPSHOT,
FILE_CACHE,
FILE_CACHE_ID_REGEX,
FILECACHE,
FSX_LUSTRE,
FSX_ONTAP,
FSX_OPENZFS,
FSX_VOLUME_ID_REGEX,
IAM_INSTANCE_PROFILE_REGEX,
IAM_POLICY_REGEX,
IAM_ROLE_REGEX,
LUSTRE,
MAX_SLURM_NODE_PRIORITY,
MIN_SLURM_NODE_PRIORITY,
ONTAP,
OPENZFS,
PCLUSTER_AMI_ID_REGEX,
SUPPORTED_OSES,
)
from pcluster.schemas.common_schema import (
AdditionalIamPolicySchema,
BaseDevSettingsSchema,
BaseSchema,
DeploymentSettingsSchema,
)
from pcluster.schemas.common_schema import ImdsSchema as TopLevelImdsSchema
from pcluster.schemas.common_schema import (
TagSchema,
get_field_validator,
is_cidr_or_prefix_list,
validate_no_duplicate_tag,
validate_no_reserved_tag,
)
from pcluster.validators.cluster_validators import EFS_MESSAGES, FSX_MESSAGES
# pylint: disable=C0302
LOGGER = logging.getLogger(__name__)
# ---------------------- Storage ---------------------- #
class HeadNodeRootVolumeSchema(BaseSchema):
"""Represent the RootVolume schema for the Head node."""
volume_type = fields.Str(
validate=get_field_validator("volume_type"),
metadata={
"update_policy": UpdatePolicy(
UpdatePolicy.UNSUPPORTED, action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"]
)
},
)
iops = fields.Int(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
size = fields.Int(
metadata={
"update_policy": UpdatePolicy(
UpdatePolicy.UNSUPPORTED,
fail_reason=UpdatePolicy.FAIL_REASONS["ebs_volume_resize"],
action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"],
)
}
)
throughput = fields.Int(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
encrypted = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
delete_on_termination = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return RootVolume(**data)
class QueueRootVolumeSchema(BaseSchema):
"""Represent the RootVolume schema for the queue."""
size = fields.Int(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
encrypted = fields.Bool(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
volume_type = fields.Str(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
iops = fields.Int(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
throughput = fields.Int(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return RootVolume(**data)
class RaidSchema(BaseSchema):
"""Represent the schema of the parameters specific to Raid. It is a child of EBS schema."""
raid_type = fields.Int(
required=True,
data_key="Type",
validate=validate.OneOf([0, 1]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
number_of_volumes = fields.Int(
validate=validate.Range(min=2, max=5), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Raid(**data)
class EbsSettingsSchema(BaseSchema):
"""Represent the schema of EBS."""
volume_type = fields.Str(
validate=get_field_validator("volume_type"),
metadata={
"update_policy": UpdatePolicy(
UpdatePolicy.UNSUPPORTED, action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"]
)
},
)
iops = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED})
size = fields.Int(
metadata={
"update_policy": UpdatePolicy(
UpdatePolicy.UNSUPPORTED,
fail_reason=UpdatePolicy.FAIL_REASONS["ebs_volume_resize"],
action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"],
)
}
)
kms_key_id = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
throughput = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED})
encrypted = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
snapshot_id = fields.Str(
validate=validate.Regexp(r"^snap-[0-9a-z]{8}$|^snap-[0-9a-z]{17}$"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
volume_id = fields.Str(
validate=validate.Regexp(r"^vol-[0-9a-z]{8}$|^vol-[0-9a-z]{17}$"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
raid = fields.Nested(RaidSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
deletion_policy = fields.Str(
validate=validate.OneOf(DELETION_POLICIES_WITH_SNAPSHOT), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
class HeadNodeEphemeralVolumeSchema(BaseSchema):
"""Represent the schema of ephemeral volume.It is a child of storage schema."""
mount_dir = fields.Str(
validate=get_field_validator("file_path"), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return EphemeralVolume(**data)
class QueueEphemeralVolumeSchema(BaseSchema):
"""Represent the schema of ephemeral volume.It is a child of storage schema."""
mount_dir = fields.Str(
validate=get_field_validator("file_path"), metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return EphemeralVolume(**data)
class HeadNodeStorageSchema(BaseSchema):
"""Represent the schema of storage attached to a node."""
root_volume = fields.Nested(HeadNodeRootVolumeSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
ephemeral_volume = fields.Nested(
HeadNodeEphemeralVolumeSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LocalStorage(**data)
class QueueStorageSchema(BaseSchema):
"""Represent the schema of storage attached to a node."""
root_volume = fields.Nested(QueueRootVolumeSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
ephemeral_volume = fields.Nested(
QueueEphemeralVolumeSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LocalStorage(**data)
class EfsSettingsSchema(BaseSchema):
"""Represent the EFS schema."""
encrypted = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
kms_key_id = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
performance_mode = fields.Str(
validate=validate.OneOf(["generalPurpose", "maxIO"]), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
throughput_mode = fields.Str(
validate=validate.OneOf(["provisioned", "bursting"]), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
provisioned_throughput = fields.Int(
validate=validate.Range(min=1, max=1024), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
file_system_id = fields.Str(
validate=validate.Regexp(r"^fs-[0-9a-z]{8}$|^fs-[0-9a-z]{17}$"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
deletion_policy = fields.Str(
validate=validate.OneOf(DELETION_POLICIES), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
encryption_in_transit = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
iam_authorization = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@validates_schema
def validate_file_system_id_ignored_parameters(self, data, **kwargs):
"""Return errors for parameters in the Efs config section that would be ignored."""
# If file_system_id is specified, all parameters are ignored.
messages = []
if data.get("file_system_id") is not None:
for key in data:
if key is not None and key not in ["encryption_in_transit", "iam_authorization", "file_system_id"]:
messages.append(EFS_MESSAGES["errors"]["ignored_param_with_efs_fs_id"].format(efs_param=key))
if messages:
raise ValidationError(message=messages)
@validates_schema
def validate_existence_of_mode_throughput(self, data, **kwargs):
"""Validate the conditional existence requirement between throughput_mode and provisioned_throughput."""
if kwargs.get("partial"):
# If the schema is to be loaded partially, do not check existence constrain.
return
throughput_mode = data.get("throughput_mode")
provisioned_throughput = data.get("provisioned_throughput")
if throughput_mode != "provisioned" and provisioned_throughput:
raise ValidationError(
message="When specifying provisioned throughput, the throughput mode must be set to provisioned",
field_name="ThroughputMode",
)
if throughput_mode == "provisioned" and not provisioned_throughput:
raise ValidationError(
message="When specifying throughput mode to provisioned,"
" the provisioned throughput option must be specified",
field_name="ProvisionedThroughput",
)
class FsxLustreSettingsSchema(BaseSchema):
"""Represent the FSX schema."""
storage_capacity = fields.Int(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
deployment_type = fields.Str(
validate=validate.OneOf(["SCRATCH_1", "SCRATCH_2", "PERSISTENT_1", "PERSISTENT_2"]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
imported_file_chunk_size = fields.Int(
validate=validate.Range(min=1, max=512000, error="has a minimum size of 1 MiB, and max size of 512,000 MiB"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
export_path = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
import_path = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
weekly_maintenance_start_time = fields.Str(
validate=validate.Regexp(r"^[1-7]:([01]\d|2[0-3]):([0-5]\d)$"),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
automatic_backup_retention_days = fields.Int(
validate=validate.Range(min=0, max=35), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
copy_tags_to_backups = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
daily_automatic_backup_start_time = fields.Str(
validate=validate.Regexp(r"^([01]\d|2[0-3]):([0-5]\d)$"), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
per_unit_storage_throughput = fields.Int(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
backup_id = fields.Str(
validate=validate.Regexp("^(backup-[0-9a-f]{8,})$"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
kms_key_id = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
file_system_id = fields.Str(
validate=validate.Regexp(r"^fs-[0-9a-z]{17}$"),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
auto_import_policy = fields.Str(
validate=validate.OneOf(["NEW", "NEW_CHANGED", "NEW_CHANGED_DELETED"]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
drive_cache_type = fields.Str(
validate=validate.OneOf(["READ"]), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
data_compression_type = fields.Str(
validate=validate.OneOf(["LZ4"]), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
fsx_storage_type = fields.Str(
data_key="StorageType",
validate=validate.OneOf(["HDD", "SSD"]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
deletion_policy = fields.Str(
validate=validate.OneOf(DELETION_POLICIES), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
@validates_schema
def validate_file_system_id_ignored_parameters(self, data, **kwargs):
"""Return errors for parameters in the FSx config section that would be ignored."""
# If file_system_id is specified, all parameters are ignored.
messages = []
if data.get("file_system_id") is not None:
for key in data:
if key is not None and key != "file_system_id":
messages.append(FSX_MESSAGES["errors"]["ignored_param_with_fsx_fs_id"].format(fsx_param=key))
if messages:
raise ValidationError(message=messages)
@validates_schema
def validate_backup_id_unsupported_parameters(self, data, **kwargs):
"""Return errors for parameters in the FSx config section that would be ignored."""
# If file_system_id is specified, all parameters are ignored.
messages = []
if data.get("backup_id") is not None:
unsupported_config_param_names = [
"deployment_type",
"per_unit_storage_throughput",
"storage_capacity",
"import_path",
"export_path",
"imported_file_chunk_size",
"kms_key_id",
]
for key in data:
if key in unsupported_config_param_names:
messages.append(FSX_MESSAGES["errors"]["unsupported_backup_param"].format(name=key))
if messages:
raise ValidationError(message=messages)
class FsxOpenZfsSettingsSchema(BaseSchema):
"""Represent the FSX OpenZFS schema."""
volume_id = fields.Str(
required=True,
validate=validate.Regexp(FSX_VOLUME_ID_REGEX),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
class FsxOntapSettingsSchema(BaseSchema):
"""Represent the FSX Ontap schema."""
volume_id = fields.Str(
required=True,
validate=validate.Regexp(FSX_VOLUME_ID_REGEX),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
class FileCacheSettingsSchema(BaseSchema):
"""Represent the File Cache schema."""
file_cache_id = fields.Str(
required=True,
validate=validate.Regexp(FILE_CACHE_ID_REGEX),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
class SharedStorageSchema(BaseSchema):
"""Represent the generic SharedStorage schema."""
mount_dir = fields.Str(
required=True,
validate=get_field_validator("file_path"),
metadata={"update_policy": UpdatePolicy.SHARED_STORAGE_UPDATE_POLICY},
)
name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
storage_type = fields.Str(
required=True,
validate=validate.OneOf(["Ebs", FSX_LUSTRE, FSX_OPENZFS, FSX_ONTAP, "Efs", FILE_CACHE]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
ebs_settings = fields.Nested(EbsSettingsSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
efs_settings = fields.Nested(EfsSettingsSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
fsx_lustre_settings = fields.Nested(FsxLustreSettingsSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
fsx_open_zfs_settings = fields.Nested(
FsxOpenZfsSettingsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
fsx_ontap_settings = fields.Nested(FsxOntapSettingsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
file_cache_settings = fields.Nested(FileCacheSettingsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@validates_schema
def no_coexist_storage_settings(self, data, **kwargs):
"""Validate that *_settings for different storage types do not co-exist."""
if self.fields_coexist(
data,
[
"ebs_settings",
"efs_settings",
"fsx_lustre_settings",
"fsx_open_zfs_settings",
"fsx_ontap_settings",
"file_cache_settings",
],
**kwargs,
):
raise ValidationError("Multiple *Settings sections cannot be specified in the SharedStorage items.")
@validates_schema
def right_storage_settings(self, data, **kwargs):
"""Validate that *_settings param is associated to the right storage type."""
for storage_type, settings in [
("Ebs", "ebs_settings"),
("Efs", "efs_settings"),
(FSX_LUSTRE, "fsx_lustre_settings"),
(FSX_OPENZFS, "fsx_open_zfs_settings"),
(FSX_ONTAP, "fsx_ontap_settings"),
(FILE_CACHE, "file_cache_settings"),
]:
# Verify the settings section is associated to the right storage type
if data.get(settings, None) and storage_type != data.get("storage_type"):
raise ValidationError(
"SharedStorage > *Settings section is not appropriate to the "
f"StorageType {data.get('storage_type')}."
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate the right type of shared storage according to the child type (EBS vs EFS vs FsxLustre)."""
storage_type = data.get("storage_type")
shared_volume_attributes = {"mount_dir": data.get("mount_dir"), "name": data.get("name")}
settings = (
data.get("efs_settings", None)
or data.get("ebs_settings", None)
or data.get("fsx_lustre_settings", None)
or data.get("fsx_open_zfs_settings", None)
or data.get("fsx_ontap_settings", None)
or data.get("file_cache_settings", None)
)
if settings:
shared_volume_attributes.update(**settings)
if storage_type == "Efs":
return SharedEfs(**shared_volume_attributes)
elif storage_type == "Ebs":
return SharedEbs(**shared_volume_attributes)
elif storage_type == FSX_LUSTRE:
return SharedFsxLustre(**shared_volume_attributes)
elif storage_type == FSX_OPENZFS:
return ExistingFsxOpenZfs(**shared_volume_attributes)
elif storage_type == FSX_ONTAP:
return ExistingFsxOntap(**shared_volume_attributes)
elif storage_type == FILE_CACHE:
return ExistingFileCache(**shared_volume_attributes)
return None
@pre_dump
def restore_child(self, data, **kwargs):
"""Restore back the child in the schema."""
adapted_data = copy.deepcopy(data)
# Move SharedXxx as a child to be automatically managed by marshmallow, see post_load action
if adapted_data.shared_storage_type == "efs":
storage_type = "efs"
elif adapted_data.shared_storage_type == "fsx":
mapping = {
LUSTRE: "fsx_lustre",
OPENZFS: "fsx_open_zfs",
ONTAP: "fsx_ontap",
FILECACHE: "file_cache_settings",
}
storage_type = mapping.get(adapted_data.file_system_type)
else: # "raid", "ebs"
storage_type = "ebs"
setattr(adapted_data, f"{storage_type}_settings", copy.copy(adapted_data))
# Restore storage type attribute
if adapted_data.shared_storage_type == "fsx":
mapping = {LUSTRE: FSX_LUSTRE, OPENZFS: FSX_OPENZFS, ONTAP: FSX_ONTAP, FILECACHE: FILE_CACHE}
adapted_data.storage_type = mapping.get(adapted_data.file_system_type)
else:
adapted_data.storage_type = storage_type.capitalize()
return adapted_data
@validates("mount_dir")
def shared_dir_validator(self, value):
"""Validate that user is not specifying /NONE or NONE as shared_dir for any filesystem."""
# FIXME: pcluster2 doesn't allow "^/?NONE$" mount dir to avoid an ambiguity in cookbook.
# We should change cookbook to solve the ambiguity and allow "^/?NONE$" for mount dir
# Cookbook location to be modified:
# https://github.com/aws/aws-parallelcluster-cookbook/blob/develop/recipes/head_node_base_config.rb#L51
if re.match("^/?NONE$", value):
raise ValidationError(f"{value} cannot be used as a shared directory")
# ---------------------- Networking ---------------------- #
class HeadNodeProxySchema(BaseSchema):
"""Represent the schema of proxy for the Head node."""
http_proxy_address = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Proxy(**data)
class QueueProxySchema(BaseSchema):
"""Represent the schema of proxy for a queue."""
http_proxy_address = fields.Str(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Proxy(**data)
class LoginNodeProxySchema(BaseSchema):
"""Represent the schema of proxy for a Login Node."""
http_proxy_address = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Proxy(**data)
class BaseNetworkingSchema(BaseSchema):
"""Represent the schema of common networking parameters used by head, compute and login nodes."""
additional_security_groups = fields.List(
fields.Str(validate=get_field_validator("security_group_id")),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
security_groups = fields.List(
fields.Str(validate=get_field_validator("security_group_id")),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
@validates_schema
def no_coexist_security_groups(self, data, **kwargs):
"""Validate that security_groups and additional_security_groups do not co-exist."""
if self.fields_coexist(data, ["security_groups", "additional_security_groups"], **kwargs):
raise ValidationError("SecurityGroups and AdditionalSecurityGroups can not be configured together.")
class HeadNodeNetworkingSchema(BaseNetworkingSchema):
"""Represent the schema of the Networking, child of the HeadNode."""
subnet_id = fields.Str(
required=True, validate=get_field_validator("subnet_id"), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
elastic_ip = fields.Raw(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
proxy = fields.Nested(HeadNodeProxySchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return HeadNodeNetworking(**data)
class PlacementGroupSchema(BaseSchema):
"""Represent the schema of placement group."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP})
id = fields.Str(metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP})
name = fields.Str(metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return PlacementGroup(**data)
class QueueNetworkingSchema(BaseNetworkingSchema):
"""Represent the schema of the Networking, child of Queue."""
assign_public_ip = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
class SlurmQueueNetworkingSchema(QueueNetworkingSchema):
"""Represent the schema of the Networking, child of slurm Queue."""
subnet_ids = fields.List(
fields.Str(validate=get_field_validator("subnet_id")),
required=True,
validate=validate.Length(min=1),
metadata={"update_policy": UpdatePolicy.MANAGED_FSX},
)
placement_group = fields.Nested(
PlacementGroupSchema, metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP}
)
proxy = fields.Nested(QueueProxySchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return SlurmQueueNetworking(**data)
class AwsBatchQueueNetworkingSchema(QueueNetworkingSchema):
"""Represent the schema of the Networking, child of aws batch Queue."""
subnet_ids = fields.List(
fields.Str(validate=get_field_validator("subnet_id")),
required=True,
validate=validate.Length(equal=1),
metadata={"update_policy": UpdatePolicy.MANAGED_FSX},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AwsBatchQueueNetworking(**data)
class BaseSshSchema(BaseSchema):
"""Represent the schema of common Ssh parameters used by head and login nodes."""
key_name = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
class HeadNodeSshSchema(BaseSshSchema):
"""Represent the schema of the HeadNodeSsh."""
allowed_ips = fields.Str(validate=is_cidr_or_prefix_list, metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return HeadNodeSsh(**data)
class DcvSchema(BaseSchema):
"""Represent the schema of DCV."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
port = fields.Int(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
allowed_ips = fields.Str(validate=is_cidr_or_prefix_list, metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Dcv(**data)
class EfaSchema(BaseSchema):
"""Represent the schema of EFA for a Compute Resource."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
gdr_support = fields.Bool(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Efa(**data)
# ---------------------- Monitoring ---------------------- #
class CloudWatchLogsSchema(BaseSchema):
"""Represent the schema of the CloudWatchLogs section."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
retention_in_days = fields.Int(
validate=validate.OneOf([1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
deletion_policy = fields.Str(
validate=validate.OneOf(DELETION_POLICIES), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CloudWatchLogs(**data)
class RotationSchema(BaseSchema):
"""Represent the schema of the Log Rotation section."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LogRotation(**data)
class CloudWatchDashboardsSchema(BaseSchema):
"""Represent the schema of the CloudWatchDashboards section."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CloudWatchDashboards(**data)
class LogsSchema(BaseSchema):
"""Represent the schema of the Logs section."""
cloud_watch = fields.Nested(CloudWatchLogsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
rotation = fields.Nested(RotationSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Logs(**data)
class DashboardsSchema(BaseSchema):
"""Represent the schema of the Dashboards section."""
cloud_watch = fields.Nested(CloudWatchDashboardsSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Dashboards(**data)
class MonitoringSchema(BaseSchema):
"""Represent the schema of the Monitoring section."""
detailed_monitoring = fields.Bool(metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
logs = fields.Nested(LogsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
dashboards = fields.Nested(DashboardsSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Monitoring(**data)
# ---------------------- Others ---------------------- #
class RolesSchema(BaseSchema):
"""Represent the schema of roles."""
lambda_functions_role = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Roles(**data)
class S3AccessSchema(BaseSchema):
"""Represent the schema of S3 access."""
bucket_name = fields.Str(
required=True,
metadata={"update_policy": UpdatePolicy.SUPPORTED},
validate=validate.Regexp(r"^[\*a-z0-9\-\.]+$"),
)
key_name = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
enable_write_access = fields.Bool(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return S3Access(**data)
class ClusterIamSchema(BaseSchema):
"""Represent the schema of IAM for Cluster."""
roles = fields.Nested(RolesSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
permissions_boundary = fields.Str(
metadata={"update_policy": UpdatePolicy.SUPPORTED}, validate=validate.Regexp(IAM_POLICY_REGEX)
)
resource_prefix = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return ClusterIam(**data)
class BaseIamSchema(BaseSchema):
"""Represent the schema of common Iam parameters used by head, queue and login nodes."""
instance_role = fields.Str(
metadata={"update_policy": UpdatePolicy.SUPPORTED}, validate=validate.Regexp(IAM_ROLE_REGEX)
)
additional_iam_policies = fields.Nested(
AdditionalIamPolicySchema, many=True, metadata={"update_policy": UpdatePolicy.SUPPORTED, "update_key": "Policy"}
)
@validates_schema
def no_coexist_role_policies(self, data, **kwargs):
"""Validate that instance_role, instance_profile or additional_iam_policies do not co-exist."""
if self.fields_coexist(data, ["instance_role", "instance_profile", "additional_iam_policies"], **kwargs):
raise ValidationError(
"InstanceProfile, InstanceRole or AdditionalIamPolicies can not be configured together."
)
class IamSchema(BaseIamSchema):
"""Common schema of IAM for HeadNode and Queue."""
s3_access = fields.Nested(
S3AccessSchema, many=True, metadata={"update_policy": UpdatePolicy.SUPPORTED, "update_key": "BucketName"}
)
@validates_schema
def no_coexist_s3_access(self, data, **kwargs):
"""Validate that instance_role, instance_profile or additional_iam_policies do not co-exist."""
if self.fields_coexist(data, ["instance_role", "s3_access"], **kwargs):
raise ValidationError("S3Access can not be configured when InstanceRole is set.")
if self.fields_coexist(data, ["instance_profile", "s3_access"], **kwargs):
raise ValidationError("S3Access can not be configured when InstanceProfile is set.")
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Iam(**data)
class HeadNodeIamSchema(IamSchema):
"""Represent the schema of IAM for HeadNode."""
instance_profile = fields.Str(
metadata={"update_policy": UpdatePolicy.UNSUPPORTED}, validate=validate.Regexp(IAM_INSTANCE_PROFILE_REGEX)
)
class QueueIamSchema(IamSchema):
"""Represent the schema of IAM for Queue."""
instance_profile = fields.Str(
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP},
validate=validate.Regexp(IAM_INSTANCE_PROFILE_REGEX),
)
class LoginNodesIamSchema(BaseIamSchema):
"""Represent the IAM schema of LoginNodes."""
instance_role = fields.Str(
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP}, validate=validate.Regexp(IAM_ROLE_REGEX)
)
additional_iam_policies = fields.Nested(
AdditionalIamPolicySchema,
many=True,
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP, "update_key": "Policy"},
)
instance_profile = fields.Str(
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP}, validate=validate.Regexp(IAM_INSTANCE_PROFILE_REGEX)
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodesIam(**data)
class ImdsSchema(BaseSchema):
"""Represent the schema of IMDS for HeadNode."""
secured = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Imds(**data)
class IntelSoftwareSchema(BaseSchema):
"""Represent the schema of additional packages."""
intel_hpc_platform = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return IntelSoftware(**data)
class AdditionalPackagesSchema(BaseSchema):
"""Represent the schema of additional packages."""
intel_software = fields.Nested(IntelSoftwareSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AdditionalPackages(**data)
class AmiSearchFiltersSchema(BaseSchema):
"""Represent the schema of the AmiSearchFilters section."""
tags = fields.Nested(
TagSchema, many=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED, "update_key": "Key"}
)
owner = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AmiSearchFilters(**data)
class TimeoutsSchema(BaseSchema):
"""Represent the schema of the Timeouts section."""
head_node_bootstrap_timeout = fields.Int(
validate=validate.Range(min=1), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
compute_node_bootstrap_timeout = fields.Int(
validate=validate.Range(min=1), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Timeouts(**data)
class CapacityReservationTargetSchema(BaseSchema):
"""Represent the schema of the CapacityReservationTarget section."""
capacity_reservation_id = fields.Str(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
capacity_reservation_resource_group_arn = fields.Str(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CapacityReservationTarget(**data)
@validates_schema
def no_coexist_id_and_group_arn(self, data, **kwargs):
"""Validate that 'capacity_reservation_id' and 'capacity_reservation_resource_group_arn' do not co-exist."""
if self.fields_coexist(
data,
["capacity_reservation_id", "capacity_reservation_resource_group_arn"],
one_required=True,
**kwargs,
):
raise ValidationError(
"A Capacity Reservation Target needs to specify either Capacity Reservation ID or "
"Capacity Reservation Resource Group ARN."
)
class ClusterDevSettingsSchema(BaseDevSettingsSchema):
"""Represent the schema of Dev Setting."""
cluster_template = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
ami_search_filters = fields.Nested(AmiSearchFiltersSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
instance_types_data = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
timeouts = fields.Nested(TimeoutsSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
compute_startup_time_metric_enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return ClusterDevSettings(**data)
# ---------------------- Health Checks ---------------------- #
class GpuHealthCheckSchema(BaseSchema):
"""Represent the schema of gpu health check."""
enabled = fields.Bool(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return GpuHealthCheck(**data)
class HealthChecksSchema(BaseSchema):
"""Represent the HealthChecks schema."""
gpu = fields.Nested(GpuHealthCheckSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return HealthChecks(**data)
# ---------------------- Node and Cluster Schema ---------------------- #
class ImageSchema(BaseSchema):
"""Represent the schema of the Image."""
os = fields.Str(
required=True, validate=validate.OneOf(SUPPORTED_OSES), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
custom_ami = fields.Str(
validate=validate.Regexp(PCLUSTER_AMI_ID_REGEX),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Image(**data)
class HeadNodeImageSchema(BaseSchema):
"""Represent the schema of the HeadNode Image."""
custom_ami = fields.Str(
validate=validate.Regexp(PCLUSTER_AMI_ID_REGEX),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return HeadNodeImage(**data)
class QueueImageSchema(BaseSchema):
"""Represent the schema of the Queue Image."""
custom_ami = fields.Str(
validate=validate.Regexp(PCLUSTER_AMI_ID_REGEX),
metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return QueueImage(**data)
class OneOrManyCustomActionField(fields.Nested):
"""Custom Marshmallow filed to handle backward compatible single script custom actions."""
def __init__(self, **kwargs):
schema = self._build_dynamic_schema_class(
kwargs.get("metadata", {}).get("update_policy", UpdatePolicy.UNSUPPORTED)
)
super().__init__(schema, **kwargs)
@staticmethod
def _build_dynamic_schema_class(update_policy):
class_name = f"CustomActionScriptSchema{update_policy.name}"
if class_name not in globals():
schema_class_type = type(
class_name,
(CustomActionScriptSchemaBase,),
{
"script": fields.Str(required=True, metadata={"update_policy": update_policy}),
"args": fields.List(fields.Str(), metadata={"update_policy": update_policy}),
},
)
globals()[class_name] = schema_class_type
else:
schema_class_type = globals()[class_name]
return schema_class_type
def _deserialize(self, value, attr, data, **kwargs):
if "Script" in value and "Sequence" in value:
raise ValidationError("Both Script and Sequence fields are provided. Only one is allowed.")
if "Script" in value:
return super()._deserialize(value, attr, data, **kwargs)
if "Sequence" in value:
sequence = value["Sequence"]
if not isinstance(sequence, list):
raise ValidationError("Invalid input type for Sequence, expected list.")
res = []
for item in sequence:
res.append(super()._deserialize(item, attr, data, **kwargs))
return res
raise ValidationError("Either Script or Sequence field must be provided.")
def _serialize(self, nested_obj, attr, obj, **kwargs):
if isinstance(nested_obj, list):
nested_serialized = []
for item in nested_obj:
nested_serialized.append(super()._serialize(item, attr, obj, **kwargs))
res = {"Sequence": nested_serialized}
else:
res = super()._serialize(nested_obj, attr, obj, **kwargs)
return res
class CustomActionScriptSchemaBase(BaseSchema):
"""Represent the schema of the custom action script that cannot be updated."""
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CustomAction(**data)
class QueueCustomActionsSchema(BaseSchema):
"""Represent the schema for all available custom actions in the queues."""
on_node_start = OneOrManyCustomActionField(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
on_node_configured = OneOrManyCustomActionField(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CustomActions(**data)
class HeadNodeCustomActionsSchema(BaseSchema):
"""Represent the schema for all available custom actions in the head node."""
on_node_start = OneOrManyCustomActionField(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
on_node_configured = OneOrManyCustomActionField(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
on_node_updated = OneOrManyCustomActionField(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return CustomActions(**data)
class InstanceTypeSchema(BaseSchema):
"""Schema of a compute resource that supports a pool of instance types."""
instance_type = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return FlexibleInstanceType(**data)
class HeadNodeSchema(BaseSchema):
"""Represent the schema of the HeadNode."""
instance_type = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
disable_simultaneous_multithreading = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
networking = fields.Nested(
HeadNodeNetworkingSchema, required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}
)
ssh = fields.Nested(HeadNodeSshSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
local_storage = fields.Nested(HeadNodeStorageSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
dcv = fields.Nested(DcvSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
custom_actions = fields.Nested(HeadNodeCustomActionsSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
iam = fields.Nested(HeadNodeIamSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
imds = fields.Nested(ImdsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
image = fields.Nested(HeadNodeImageSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return HeadNode(**data)
class LoginNodesImageSchema(BaseSchema):
"""Represent the Image schema of LoginNodes."""
custom_ami = fields.Str(
validate=validate.Regexp(PCLUSTER_AMI_ID_REGEX),
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodesImage(**data)
class LoginNodesSshSchema(BaseSshSchema):
"""Represent the Ssh schema of LoginNodes."""
key_name = fields.Str(metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodesSsh(**data)
class LoginNodesNetworkingSchema(BaseNetworkingSchema):
"""Represent the networking schema of LoginNodes."""
subnet_ids = fields.List(
fields.Str(validate=get_field_validator("subnet_id")),
required=True,
validate=validate.Length(equal=1, error="Only one subnet can be associated with a login node pool."),
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP},
)
additional_security_groups = fields.List(
fields.Str(validate=get_field_validator("security_group_id")),
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP},
)
security_groups = fields.List(
fields.Str(validate=get_field_validator("security_group_id")),
metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP},
)
proxy = fields.Nested(LoginNodeProxySchema, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodesNetworking(**data)
class LoginNodesPoolSchema(BaseSchema):
"""Represent the schema of the LoginNodesPool."""
name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
instance_type = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
image = fields.Nested(LoginNodesImageSchema, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
networking = fields.Nested(
LoginNodesNetworkingSchema, required=True, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP}
)
count = fields.Int(
required=True,
validate=validate.Range(
min=0,
error="The count for LoginNodes Pool must be greater than or equal to 0.",
),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
ssh = fields.Nested(LoginNodesSshSchema, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
iam = fields.Nested(LoginNodesIamSchema, metadata={"update_policy": UpdatePolicy.LOGIN_NODES_STOP})
gracetime_period = fields.Int(
validate=validate.Range(
min=3, max=120, error="The gracetime period for LoginNodes Pool must be an integer from 3 to 120."
),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodesPool(**data)
class LoginNodesSchema(BaseSchema):
"""Represent the schema of LoginNodes."""
pools = fields.Nested(
LoginNodesPoolSchema,
many=True,
required=True,
validate=validate.Length(equal=1, error="Only one pool can be specified when using login nodes."),
metadata={"update_policy": UpdatePolicy(UpdatePolicy.LOGIN_NODES_POOLS), "update_key": "Name"},
)
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return LoginNodes(**data)
class _ComputeResourceSchema(BaseSchema):
"""Represent the schema of the ComputeResource."""
name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
class SlurmComputeResourceNetworkingSchema(BaseSchema):
"""Represent the Networking schema of the Slurm ComputeResource."""
placement_group = fields.Nested(
PlacementGroupSchema, metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return SlurmComputeResourceNetworking(**data)
class QueueTagSchema(BaseSchema):
"""Represent the schema of Tag section."""
key = fields.Str(
required=True,
validate=validate.Length(max=128),
metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY},
)
value = fields.Str(
required=True,
validate=validate.Length(max=256),
metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return BaseTag(**data)
class SlurmComputeResourceSchema(_ComputeResourceSchema):
"""Represent the schema of the Slurm ComputeResource."""
instance_type = fields.Str(metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
instances = fields.Nested(
InstanceTypeSchema,
many=True,
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP_ON_REMOVE, "update_key": "InstanceType"},
)
max_count = fields.Int(validate=validate.Range(min=1), metadata={"update_policy": UpdatePolicy.MAX_COUNT})
min_count = fields.Int(validate=validate.Range(min=0), metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
spot_price = fields.Float(
validate=validate.Range(min=0), metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
efa = fields.Nested(EfaSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
disable_simultaneous_multithreading = fields.Bool(metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
schedulable_memory = fields.Int(metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
capacity_reservation_target = fields.Nested(
CapacityReservationTargetSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
networking = fields.Nested(
SlurmComputeResourceNetworkingSchema, metadata={"update_policy": UpdatePolicy.MANAGED_PLACEMENT_GROUP}
)
health_checks = fields.Nested(HealthChecksSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
custom_slurm_settings = fields.Dict(metadata={"update_policy": UpdatePolicy.SUPPORTED})
tags = fields.Nested(
QueueTagSchema, many=True, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY, "update_key": "Key"}
)
static_node_priority = fields.Int(
validate=validate.Range(min=MIN_SLURM_NODE_PRIORITY, max=MAX_SLURM_NODE_PRIORITY),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
dynamic_node_priority = fields.Int(
validate=validate.Range(min=MIN_SLURM_NODE_PRIORITY, max=MAX_SLURM_NODE_PRIORITY),
metadata={"update_policy": UpdatePolicy.SUPPORTED},
)
@validates_schema
def no_coexist_instance_type_flexibility(self, data, **kwargs):
"""Validate that 'instance_type' and 'instances' do not co-exist."""
if self.fields_coexist(
data,
["instance_type", "instances"],
one_required=True,
**kwargs,
):
raise ValidationError("A Compute Resource needs to specify either InstanceType or Instances.")
@validates("instances")
def no_duplicate_instance_types(self, flexible_instance_types: List[FlexibleInstanceType]):
"""Verify that there are no duplicates in Instances."""
instance_types = set()
for flexible_instance_type in flexible_instance_types:
instance_type_name = flexible_instance_type.instance_type
if instance_type_name in instance_types:
raise ValidationError(
f"Duplicate instance type ({instance_type_name}) detected. Instances should not have "
f"duplicate instance types. "
)
instance_types.add(instance_type_name)
@validates("tags")
def validate_tags(self, tags):
"""Validate tags."""
validate_no_reserved_tag(tags)
validate_no_duplicate_tag(tags)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
if data.get("instances"):
return SlurmFlexibleComputeResource(**data)
return SlurmComputeResource(**data)
class AwsBatchComputeResourceSchema(_ComputeResourceSchema):
"""Represent the schema of the Batch ComputeResource."""
instance_types = fields.List(
fields.Str(), required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP}
)
max_vcpus = fields.Int(
data_key="MaxvCpus",
validate=validate.Range(min=1),
metadata={"update_policy": UpdatePolicy.AWSBATCH_CE_MAX_RESIZE},
)
min_vcpus = fields.Int(
data_key="MinvCpus", validate=validate.Range(min=0), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
desired_vcpus = fields.Int(
data_key="DesiredvCpus", validate=validate.Range(min=0), metadata={"update_policy": UpdatePolicy.IGNORED}
)
spot_bid_percentage = fields.Int(
validate=validate.Range(min=0, max=100, min_inclusive=False), metadata={"update_policy": UpdatePolicy.SUPPORTED}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AwsBatchComputeResource(**data)
class ComputeSettingsSchema(BaseSchema):
"""Represent the schema of the compute_settings schedulers queues."""
local_storage = fields.Nested(QueueStorageSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
@post_load()
def make_resource(self, data, **kwargs):
"""Generate resource."""
return ComputeSettings(**data)
class BaseQueueSchema(BaseSchema):
"""Represent the schema of the attributes in common between all the schedulers queues."""
name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
capacity_type = fields.Str(
validate=validate.OneOf([event.value for event in CapacityType]),
metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY},
)
class _CommonQueueSchema(BaseQueueSchema):
"""Represent the schema of common part between Slurm and future scheduler Queue."""
compute_settings = fields.Nested(
ComputeSettingsSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
custom_actions = fields.Nested(
QueueCustomActionsSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
iam = fields.Nested(QueueIamSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
image = fields.Nested(QueueImageSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY})
capacity_reservation_target = fields.Nested(
CapacityReservationTargetSchema, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
class SlurmQueueSchema(_CommonQueueSchema):
"""Represent the schema of a Slurm Queue."""
allocation_strategy = fields.Str(
validate=validate.OneOf([strategy.value for strategy in AllocationStrategy]),
metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY},
)
compute_resources = fields.Nested(
SlurmComputeResourceSchema,
many=True,
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP_ON_REMOVE, "update_key": "Name"},
)
networking = fields.Nested(
SlurmQueueNetworkingSchema, required=True, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY}
)
health_checks = fields.Nested(HealthChecksSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
custom_slurm_settings = fields.Dict(metadata={"update_policy": UpdatePolicy.SUPPORTED})
tags = fields.Nested(
QueueTagSchema, many=True, metadata={"update_policy": UpdatePolicy.QUEUE_UPDATE_STRATEGY, "update_key": "Key"}
)
job_exclusive_allocation = fields.Bool(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return SlurmQueue(**data)
@validates("tags")
def validate_tags(self, tags):
"""Validate tags."""
validate_no_reserved_tag(tags)
validate_no_duplicate_tag(tags)
class AwsBatchQueueSchema(BaseQueueSchema):
"""Represent the schema of a Batch Queue."""
compute_resources = fields.Nested(
AwsBatchComputeResourceSchema,
many=True,
validate=validate.Length(equal=1),
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP, "update_key": "Name"},
)
networking = fields.Nested(
AwsBatchQueueNetworkingSchema, required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP}
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AwsBatchQueue(**data)
class DnsSchema(BaseSchema):
"""Represent the schema of Dns Settings."""
disable_managed_dns = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
hosted_zone_id = fields.Str(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
use_ec2_hostnames = fields.Bool(metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Dns(**data)
class DatabaseSchema(BaseSchema):
"""Represent the schema of the DirectoryService."""
uri = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
user_name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
password_secret_arn = fields.Str(
required=True,
validate=validate.Regexp(r"^arn:.*:secret"),
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP},
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return Database(**data)
class SlurmSettingsSchema(BaseSchema):
"""Represent the schema of the Scheduling Settings."""
scaledown_idletime = fields.Int(
validate=validate.Range(min=-1),
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP},
)
dns = fields.Nested(DnsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
queue_update_strategy = fields.Str(
validate=validate.OneOf([strategy.value for strategy in QueueUpdateStrategy]),
metadata={"update_policy": UpdatePolicy.IGNORED},
)
enable_memory_based_scheduling = fields.Bool(metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
database = fields.Nested(DatabaseSchema, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP})
custom_slurm_settings = fields.List(fields.Dict, metadata={"update_policy": UpdatePolicy.SUPPORTED})
custom_slurm_settings_include_file = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return SlurmSettings(**data)
class AwsBatchSettingsSchema(BaseSchema):
"""Represent the schema of the AwsBatch Scheduling Settings."""
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return AwsBatchSettings(**data)
class SchedulingSchema(BaseSchema):
"""Represent the schema of the Scheduling."""
scheduler = fields.Str(
required=True,
validate=validate.OneOf(["slurm", "awsbatch"]),
metadata={"update_policy": UpdatePolicy.UNSUPPORTED},
)
# Slurm schema
slurm_settings = fields.Nested(SlurmSettingsSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
slurm_queues = fields.Nested(
SlurmQueueSchema,
many=True,
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP_ON_REMOVE, "update_key": "Name"},
)
# Awsbatch schema:
aws_batch_queues = fields.Nested(
AwsBatchQueueSchema,
many=True,
validate=validate.Length(equal=1),
metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP, "update_key": "Name"},
)
aws_batch_settings = fields.Nested(
AwsBatchSettingsSchema, metadata={"update_policy": UpdatePolicy.COMPUTE_FLEET_STOP}
)
@validates_schema
def no_coexist_schedulers(self, data, **kwargs):
"""Validate that *_settings and *_queues for different schedulers do not co-exist."""
scheduler = data.get("scheduler")
if self.fields_coexist(data, ["aws_batch_settings", "slurm_settings"], **kwargs):
raise ValidationError("Multiple *Settings sections cannot be specified in the Scheduling section.")
if self.fields_coexist(data, ["aws_batch_queues", "slurm_queues"], one_required=True, **kwargs):
if scheduler == "awsbatch":
scheduler_prefix = "AwsBatch"
else:
scheduler_prefix = scheduler.capitalize()
raise ValidationError(f"{scheduler_prefix}Queues section must be specified in the Scheduling section.")
@validates_schema
def right_scheduler_schema(self, data, **kwargs):
"""Validate that *_settings field is associated to the right scheduler."""
for scheduler, settings, queues in [
("awsbatch", "aws_batch_settings", "aws_batch_queues"),
("slurm", "slurm_settings", "slurm_queues"),
]:
# Verify the settings section is associated to the right scheduler type
configured_scheduler = data.get("scheduler")
if settings in data and scheduler != configured_scheduler:
raise ValidationError(
f"Scheduling > *Settings section is not appropriate to the Scheduler: {configured_scheduler}."
)
if queues in data and scheduler != configured_scheduler:
raise ValidationError(
f"Scheduling > *Queues section is not appropriate to the Scheduler: {configured_scheduler}."
)
@post_load
def make_resource(self, data, **kwargs):
"""Generate the right type of scheduling according to the child type (Slurm vs AwsBatch vs Custom)."""
scheduler = data.get("scheduler")
if scheduler == "slurm":
return SlurmScheduling(queues=data.get("slurm_queues"), settings=data.get("slurm_settings", None))
if scheduler == "awsbatch":
return AwsBatchScheduling(
queues=data.get("aws_batch_queues"), settings=data.get("aws_batch_settings", None)
)
return None
@pre_dump
def restore_child(self, data, **kwargs):
"""Restore back the child in the schema, see post_load action."""
adapted_data = copy.deepcopy(data)
if adapted_data.scheduler == "awsbatch":
scheduler_prefix = "aws_batch"
else:
scheduler_prefix = adapted_data.scheduler
setattr(adapted_data, f"{scheduler_prefix}_queues", copy.copy(getattr(adapted_data, "queues", None)))
setattr(adapted_data, f"{scheduler_prefix}_settings", copy.copy(getattr(adapted_data, "settings", None)))
return adapted_data
class DirectoryServiceSchema(BaseSchema):
"""Represent the schema of the DirectoryService."""
domain_name = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
domain_addr = fields.Str(required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
password_secret_arn = fields.Str(
required=True,
validate=validate.Regexp(r"^arn:.*:(secretsmanager:.*:.*:secret:|ssm:.*:.*:parameter\/).*$"),
metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP},
)
domain_read_only_user = fields.Str(
required=True, metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP}
)
ldap_tls_ca_cert = fields.Str(metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
ldap_tls_req_cert = fields.Str(
validate=validate.OneOf(["never", "allow", "try", "demand", "hard"]),
metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP},
)
ldap_access_filter = fields.Str(metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
generate_ssh_keys_for_users = fields.Bool(metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
additional_sssd_configs = fields.Dict(metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP})
@post_load
def make_resource(self, data, **kwargs):
"""Generate resource."""
return DirectoryService(**data)
class ClusterSchema(BaseSchema):
"""Represent the schema of the Cluster."""
login_nodes = fields.Nested(LoginNodesSchema, many=False, metadata={"update_policy": UpdatePolicy.SUPPORTED})
image = fields.Nested(ImageSchema, required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
head_node = fields.Nested(HeadNodeSchema, required=True, metadata={"update_policy": UpdatePolicy.SUPPORTED})
scheduling = fields.Nested(SchedulingSchema, required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
shared_storage = fields.Nested(
SharedStorageSchema,
many=True,
metadata={
"update_policy": UpdatePolicy(UpdatePolicy.SHARED_STORAGE_UPDATE_POLICY),
"update_key": "Name",
},
)
monitoring = fields.Nested(MonitoringSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
additional_packages = fields.Nested(AdditionalPackagesSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
tags = fields.Nested(
TagSchema, many=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED, "update_key": "Key"}
)
iam = fields.Nested(ClusterIamSchema, metadata={"update_policy": UpdatePolicy.IGNORED})
directory_service = fields.Nested(
DirectoryServiceSchema, metadata={"update_policy": UpdatePolicy.COMPUTE_AND_LOGIN_NODES_STOP}
)
config_region = fields.Str(data_key="Region", metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
imds = fields.Nested(TopLevelImdsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
custom_s3_bucket = fields.Str(metadata={"update_policy": UpdatePolicy.READ_ONLY_RESOURCE_BUCKET})
additional_resources = fields.Str(metadata={"update_policy": UpdatePolicy.SUPPORTED})
dev_settings = fields.Nested(ClusterDevSettingsSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED})
deployment_settings = fields.Nested(DeploymentSettingsSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED})
def __init__(self, cluster_name: str):
super().__init__()
self.cluster_name = cluster_name
@validates("tags")
def validate_tags(self, tags):
"""Validate tags."""
validate_no_reserved_tag(tags)
validate_no_duplicate_tag(tags)
@validates_schema
def no_settings_for_batch(self, data, **kwargs):
"""Ensure IntelSoftware and DirectoryService section is not included when AWS Batch is the scheduler."""
scheduling = data.get("scheduling")
head_node = data.get("head_node")
if scheduling and scheduling.scheduler == "awsbatch":
error_message = "The use of the {} configuration is not supported when using awsbatch as the scheduler."
additional_packages = data.get("additional_packages")
if (
additional_packages
and additional_packages.intel_software
and additional_packages.intel_software.intel_hpc_platform
):
raise ValidationError(error_message.format("IntelSoftware"))
if head_node.custom_actions and head_node.custom_actions.on_node_updated:
raise ValidationError(error_message.format("OnNodeUpdated"))
if data.get("directory_service"):
raise ValidationError(error_message.format("DirectoryService"))
@post_load(pass_original=True)
def make_resource(self, data, original_data, **kwargs):
"""Generate cluster according to the scheduler. Save original configuration."""
scheduler = data.get("scheduling").scheduler
if scheduler == "slurm":
cluster = SlurmClusterConfig(cluster_name=self.cluster_name, **data)
elif scheduler == "awsbatch":
cluster = AwsBatchClusterConfig(cluster_name=self.cluster_name, **data)
else:
raise ValidationError(f"Unsupported scheduler {scheduler}.")
cluster.source_config = original_data
return cluster
|
from abc import ABC
from input_data import InputData
from result import ResultData
class Solver(ABC):
name:str=None
def __call__(self, data:InputData)->ResultData:
pass |
"""Add TestCase.step_id
Revision ID: 3265d2120c82
Revises: 21c9439330f
Create Date: 2014-04-02 15:26:58.967387
"""
# revision identifiers, used by Alembic.
revision = '3265d2120c82'
down_revision = '21c9439330f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('test', sa.Column('step_id', sa.GUID(), nullable=True))
op.create_foreign_key('test_step_id_fkey', 'test', 'jobstep', ['step_id'], ['id'], ondelete='CASCADE')
op.create_index('idx_test_step_id', 'test', ['step_id'])
def downgrade():
op.drop_column('test', 'step_id')
|
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
import collections
import cv2
from realsafe.attack.base import Attack
from realsafe.model import Classifier
from realsafe.attack.utils import mean_square_distance as distance
class Evolutionary(Attack):
"""
l_2
optimized
"""
def __init__(self, model, batch_size):
assert isinstance(model, Classifier)
if batch_size != 1:
raise NotImplementedError
Attack.__init__(self, model=model, batch_size=batch_size)
self.xs_ph = tf.placeholder(model.x_dtype, (None,) + self.model.x_shape)
self.labels_pred = self.model.labels(self.xs_ph)
def _is_adversarial(self, xs, ys, ys_target, session, goal):
label = session.run(self.labels_pred,
feed_dict={self.xs_ph: xs[np.newaxis]})[0]
if goal == "ut" or goal == "tm":
return label != ys
else:
return label == ys_target
def log_step(self, step, prediction, distance, sigma, mu, message=''):
print("Step {}: {:.5e}, prediction = {}, "
"stepsizes = {:.1e}/{:.1e}: {}".format(
step, distance, prediction, sigma, mu, message))
def config(self, **kwargs):
self.max_queries = kwargs["max_queries"]
self.goal = kwargs["goal"]
self.mu = kwargs["mu"]
self.sigma = kwargs["sigma"]
self.sample_size = kwargs['sample_size']
self.logging = kwargs.get("logging", True)
self.return_details = kwargs.get("return_details", True)
def batch_attack(self, xs, ys, ys_target, session, starting_point):
assert xs.shape[0] == self.batch_size # Only run one example
xs = np.squeeze(xs, axis=0)
if ys is not None:
assert ys.shape[0] == self.batch_size
ys = np.squeeze(ys, axis=0)
if ys_target is not None:
assert ys_target.shape[0] == self.batch_size
ys_target = np.squeeze(ys_target, axis=0)
if self._is_adversarial(xs, ys, ys_target, session, self.goal):
if self.logging:
print("Original image is adversarial")
if self.return_details:
return xs[np.newaxis], np.zeros([self.max_queries + 1])
else:
return xs[np.newaxis]
assert self._is_adversarial(starting_point, ys, ys_target, session, self.goal)
mu = self.mu
min_ = self.model.x_min
max_ = self.model.x_max
shape = self.model.x_shape
x_dtype = self.model.x_dtype.as_numpy_dtype
do_dimension_reduction = (len(shape) == 3)
xs_adv = starting_point.astype(x_dtype).copy()
dis = distance(xs_adv, xs, min_, max_)
stats_adversarial = collections.deque(maxlen=30)
dis_per_query = np.zeros([self.max_queries + 1])
if do_dimension_reduction:
pert_shape = (self.sample_size, self.sample_size, shape[2])
else:
pert_shape = shape
N = 1
for i in range(len(pert_shape)):
N *= pert_shape[i]
K = int(N / 20)
evolution_path = np.zeros(pert_shape, dtype=x_dtype)
decay_factor = 0.99
diagonal_covariance = np.ones(pert_shape, dtype=x_dtype)
c = 0.001
xs_adv_label = session.run(
self.labels_pred, feed_dict={self.xs_ph: xs_adv[np.newaxis]})[0]
if self.logging:
self.log_step(0, xs_adv_label, dis, self.sigma, mu)
dis_per_query[0] = dis
for step in range(1, self.max_queries + 1):
unnormalized_source_direction = xs - xs_adv
source_norm = np.linalg.norm(unnormalized_source_direction)
selection_probability = \
diagonal_covariance.reshape(-1) / np.sum(diagonal_covariance)
selected_indices = np.random.choice(
N, K, replace=False, p=selection_probability)
perturbation = np.random.normal(0, 1, pert_shape).astype(x_dtype)
factor = np.zeros([N], dtype=x_dtype)
factor[selected_indices] = 1
perturbation *= factor.reshape(
pert_shape) * np.sqrt(diagonal_covariance)
if do_dimension_reduction:
perturbation_large = cv2.resize(perturbation, shape[:2])
else:
perturbation_large = perturbation
biased = xs_adv + mu * unnormalized_source_direction
candidate = biased + self.sigma * source_norm * \
perturbation_large / np.linalg.norm(perturbation_large)
candidate = xs - (xs - candidate) / np.linalg.norm(xs - candidate) \
* np.linalg.norm(xs - biased)
candidate = np.clip(candidate, min_, max_)
is_adversarial = self._is_adversarial(candidate, ys, ys_target,
session, self.goal)
stats_adversarial.appendleft(is_adversarial)
if is_adversarial:
new_xs_adv = candidate
new_dis = distance(new_xs_adv, xs, min_, max_)
evolution_path = decay_factor * evolution_path + \
np.sqrt(1 - decay_factor ** 2) * perturbation
diagonal_covariance = (1 - c) * diagonal_covariance + \
c * (evolution_path ** 2)
else:
new_xs_adv = None
message = ''
if new_xs_adv is not None:
abs_improvement = dis - new_dis
rel_improvement = abs_improvement / dis
message = 'd. reduced by {:.2f}% ({:.4e})'.format(
rel_improvement * 100, abs_improvement)
xs_adv = new_xs_adv
dis = new_dis
dis_per_query[step] = dis
xs_adv_label = session.run(
self.labels_pred, feed_dict={self.xs_ph: xs_adv[np.newaxis]})[0]
if self.logging:
self.log_step(step, xs_adv_label, dis, self.sigma, mu, message)
if len(stats_adversarial) == stats_adversarial.maxlen:
p_step = np.mean(stats_adversarial)
n_step = len(stats_adversarial)
mu *= np.exp(p_step - 0.2)
stats_adversarial.clear()
if self.return_details:
return xs_adv[np.newaxis], dis_per_query
else:
return xs_adv[np.newaxis]
|
import pandas.util.testing as tm
import pyspark.sql.functions as F
import pytest
from pyspark.sql.window import Window
from pytest import param
import ibis
pytest.importorskip('pyspark')
pytestmark = pytest.mark.pyspark
@pytest.mark.parametrize(
('ibis_window', 'spark_range'),
[
param(
ibis.trailing_window(
preceding=ibis.interval(hours=1),
order_by='time',
group_by='key',
),
(-3600, 0),
),
param(
ibis.trailing_window(
preceding=ibis.interval(hours=2),
order_by='time',
group_by='key',
),
(-7200, 0),
),
param(
ibis.range_window(
preceding=0,
following=ibis.interval(hours=1),
order_by='time',
group_by='key',
),
(0, 3600),
),
],
)
def test_time_indexed_window(client, ibis_window, spark_range):
table = client.table('time_indexed_table')
result = table.mutate(
mean=table['value'].mean().over(ibis_window)
).compile()
result_pd = result.toPandas()
spark_table = table.compile()
spark_window = (
Window.partitionBy('key')
.orderBy(F.col('time').cast('long'))
.rangeBetween(*spark_range)
)
expected = spark_table.withColumn(
'mean', F.mean(spark_table['value']).over(spark_window),
).toPandas()
tm.assert_frame_equal(result_pd, expected)
def test_multiple_windows(client):
table = client.table('time_indexed_table')
window1 = ibis.trailing_window(
preceding=ibis.interval(hours=1), order_by='time', group_by='key'
)
window2 = ibis.trailing_window(
preceding=ibis.interval(hours=2), order_by='time', group_by='key'
)
result = table.mutate(
mean_1h=table['value'].mean().over(window1),
mean_2h=table['value'].mean().over(window2),
).compile()
result_pd = result.toPandas()
df = table.compile().toPandas()
expected_win_1 = (
df.set_index('time')
.groupby('key')
.value.rolling('1h', closed='both')
.mean()
.rename('mean_1h')
).reset_index(drop=True)
expected_win_2 = (
df.set_index('time')
.groupby('key')
.value.rolling('2h', closed='both')
.mean()
.rename('mean_2h')
).reset_index(drop=True)
tm.assert_series_equal(result_pd['mean_1h'], expected_win_1)
tm.assert_series_equal(result_pd['mean_2h'], expected_win_2)
|
def nejdelsi_slovo (a):
max = 'j'
for i in a.split():
if len(a) > max:
max = i
return max
a = str('se dodo, dodose')
nejdelsi_slovo(a.split(' '))
|
#!/usr/bin/env python3
import sys
morse = {'a': ".-",
'b': "-...",
'c': "-.-.",
'd': "-..",
'e': ".",
'f': "..-.",
'g': "--.",
'h': "....",
'i': "..",
'j': ".---",
'k': "-.-",
'l': ".-..",
'm': "--",
'n': "-.",
'o': "---",
'p': ".--.",
'q': "--.-",
'r': ".-.",
's': "...",
't': "-",
'u': "..-",
'v': "...-",
'w': ".--",
'x': "-..-",
'y': "-.--",
'z': "--.."}
def main():
if len(sys.argv) == 2:
ret = sys.argv[1].split(" ")
ret = "".join(ret)
if len(sys.argv) == 2 and ret != "" and ret.isalpha():
ret = ret.lower()
aux = ""
for i in ret:
for j, k in morse.items():
if i == j:
aux += k
print(aux)
else:
print("usage: " + sys.argv[0] + " " + "a-zA-Z string")
if __name__ == "__main__":
main() |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import Topic, Entry
from .forms import TopicForm, EntryForm
from . import utils
def get_homepage(request):
"""Рендерит домашнюю страницу"""
return render(request, 'learning_logs/index.html')
@login_required
def get_topics(request):
"""Выводит список тем отсортированную по дате добавления"""
topics = Topic.objects.filter(owner=request.user).order_by('data_added')
context = {'topics': topics}
return render(request, 'learning_logs/topics.html', context)
@login_required
def get_topic_detail(request, pk):
"""Получение записей по конретной теме"""
topic = Topic.objects.get(pk=pk)
utils.check_topic_owner(request, topic)
entries = topic.entry_set.order_by('-data_added')
context = {
'topic': topic,
'entries': entries,
}
return render(request,'learning_logs/topic_detail.html', context)
@login_required
def add_new_topic(request):
"""Создае новую тему для изучения"""
if request.method == 'POST':
form = TopicForm(request.POST or None)
if form.is_valid():
new_topic = form.save(commit=False)
new_topic.owner = request.user
new_topic.save()
return redirect('topics')
form = TopicForm(request.POST or None)
context = {'form': form}
return render(request, 'learning_logs/new_topic.html', context)
@login_required
def add_new_entry(request, pk):
"""Добавляет новую запись к конкретной теме"""
topic = Topic.objects.get(pk=pk)
utils.check_topic_owner(request, topic)
if request.method == 'POST':
form = EntryForm(request.POST or None)
if form.is_valid():
new_entry = form.save(commit=False)
new_entry.topic = topic
new_entry.save()
return redirect('topic_detail', pk=pk)
form = EntryForm(request.POST or None)
context = {'topic': topic, 'form': form}
return render(request, 'learning_logs/add_new_entry.html', context)
@login_required
def edit_entry(request, pk):
"""Редактирует существующую запись"""
entry = Entry.objects.get(pk=pk)
topic = entry.topic
utils.check_topic_owner(request, topic)
if request.method == 'POST':
form = EntryForm(request.POST, instance=entry)
if form.is_valid():
form.save()
return redirect('topic_detail', pk=topic.id)
form = EntryForm(instance=entry)
context = {'entry': entry, 'topic': topic, 'form': form}
return render(request, 'learning_logs/edit_entry.html', context)
|
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from clubkit.shop.models import Product
from clubkit.clubs.models import ClubInfo
from .cart import Cart, CartPackage
from .forms import CartAddProductForm
from clubkit.clubs.models import Packages
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product, quantity=cd['quantity'], update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
hidecart = False
cart = Cart(request)
club_pk = request.session.get('pk')
club = ClubInfo.objects.filter(pk=club_pk)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'], 'update': True})
return render(request, 'cart/details.html', {'cart': cart,
'club_pk': club_pk,
'club': club,
'hidecart': hidecart})
@require_POST
def cart_add_package(request, product_id):
cart = CartPackage(request)
product = get_object_or_404(Packages, id=product_id)
cart.add(product=product)
return redirect('cart:cart_detail_package')
def cart_remove_package(request, product_id):
cart = CartPackage(request)
product = get_object_or_404(Packages, id=product_id)
cart.remove(product)
return redirect('cart:cart_add_package')
def cart_detail_package(request):
hidecart = False
cart = CartPackage(request)
return render(request, 'club/package-detail.html', {'cart': cart,
'hidecart': hidecart})
|
''' Calcular o valor de k, considerando que f,L,Q,ΔH,ϑ
são dados de entrada e g=9.81 (gravidade) e ε=0.000002.
D=8fLQ2π2gΔH−−−−−−−√5
Rey=4QπDϑ
k=0.25[log10(ε3.7D+5.74Rey0.9)]2
ENTRADA:
A entrada f,L,Q,ΔH,ϑ deve ser solicitada nesta ordem.
Lembrando que g=9.81 e ε=0.000002 e π são constantes
e NÃO devem ser pedidos como entradas.O valor de π
deve ser utilizado como: math.pi (escreva assim
mesmo, math.pi)
SAÍDA:
A saída deve apresentar o valor de D, Rey e k (com 4 casas decimais).
Exemplo:
Para a entrada:
f = 0.2
L = 50000
Q = 0.65
DeltaH = 22
v = 0.000001
A saída deve mostrar os dados a seguir (nesta ordem):
D=1.7382
Rey=476122.1893
k=0.0132
No entanto, você deve omitir variáveis e operadores na saída,
exibindo apenas os números na ordem pedida, um valor por linha.
Logo, a saída do seu programa para esse exemplo deve ser
EXATAMENTE assim:
1.7382
476122.1893
0.0132
DICA:
O logaritmo na base 10 pode ser calculado com o comando:
math.log10(expressao)
Ex:
math.log10(a*b)
calcula o logaritmo na base 10 da expressão a*b.'''
# -*- coding: utf-8 -*-
import math
f = float(input())
L = float(input())
Q = float(input())
DeltaH = float(input())
v = float(input())
e = 0.000002
g = 9.81
D = ((8*f*L*(Q**2))/((math.pi**2)*(g)*DeltaH))**(1/5)
Rey = (4*Q)/(math.pi*D*v)
k = ((0.25)/(math.log10(((e)/(3.7*D))+(5.74/(Rey**0.9)))**2))
print("\n%.4f\n\n%.4f\n\n%.4f\n" % (D, Rey, k)) |
from stark.service.v1 import StarkHandler
from django.conf.urls import url
from django.shortcuts import render, reverse, HttpResponse, redirect
from django.utils.safestring import mark_safe
from django.http import JsonResponse
from django.conf import settings
from manage_system import models
from stark.forms.forms import BootStrapModelForm
import os
class BasicModelForm(BootStrapModelForm):
class Meta:
model = models.HomeWorkStudentRecord
fields = ['homework', 'avatar']
class HomeworkStudentHandler(StarkHandler):
change_template = 'homework_submit.html'
has_add_btn = None
def display_submit(self, request, obj=None, is_header=None, *args, **kwargs):
if is_header:
return '作业提交'
current_user_id = request.session['user_info']['id']
course_record_obj = models.HomeWorkStudentRecord.objects.filter(pk=obj.pk, student_id=current_user_id).get()
name = '%s:%s' % (self.site.namespace, self.get_url_name('homeworksubmit'))
attendance_url = reverse(name, kwargs={'course_record_id': course_record_obj.course_record_id, 'pk': obj.pk})
tpl = '<a target="_blank" href="%s">作业提交</a>' % attendance_url
return mark_safe(tpl)
def display_homeworkview(self, request, obj=None, is_header=None, *args, **kwargs):
if is_header:
return '作业查看'
current_user_id = request.session['user_info']['id']
course_record_obj = models.HomeWorkStudentRecord.objects.filter(pk=obj.pk, student_id=current_user_id).get()
name = '%s:%s' % (self.site.namespace, self.get_url_name('homeworkview'))
attendance_url = reverse(name, kwargs={'course_record_id': course_record_obj.course_record_id})
tpl = '<a target="_blank" href="%s">作业</a>' % attendance_url
return mark_safe(tpl)
list_display = ['student', 'course_record', display_homeworkview, display_submit]
def get_urls(self):
patterns = [
url(r'list/$', self.wrapper(self.changelist), name=self.get_list_url_name),
# url(r'add/$', self.wrapper(self.add_view), name=self.get_add_url_name),
# url(r'change/(?P<pk>\d+)/$', self.wrapper(self.change_view), name=self.get_change_url_name),
# url(r'del/(?P<pk>\d+)/$', self.wrapper(self.delete_view), name=self.get_delete_url_name),
url(r'homeworkview/(?P<course_record_id>\d+)/$', self.wrapper(self.homework_view),
name=self.get_url_name('homeworkview')),
url(r'submit/(?P<course_record_id>\d+)/(?P<pk>\d+)/$', self.wrapper(self.homework_submit),
name=self.get_url_name('homeworksubmit')),
]
patterns.extend(self.extra_urls()) # 这个self并不是这个基类的self 而是最开始从传入的那个handler开始寻找有没有extra_urls
return patterns
def get_queryset(self, request, *args, **kwargs):
# request.session['user_info']['id']
current_user_id = request.session['user_info']['id']
return self.model_class.objects.filter(student_id=current_user_id)
def get_change_object(self, request, pk, *args, **kwargs):
course_record_id = kwargs.get('course_record_id')
current_user_id = request.session['user_info']['id']
return self.model_class.objects.filter(pk=pk, course_record_id=course_record_id,
student_id=current_user_id).first()
def homework_submit(self, request, pk, *args, **kwargs):
current_user_id = request.session['user_info']['id']
course_record_id = kwargs.get('course_record_id')
current_obj = self.get_change_object(request, pk, *args, **kwargs)
if not current_obj:
return HttpResponse('你要的数据不存在,请重新输入')
basic_forms = BasicModelForm
if request.is_ajax():
response = {'user': None, 'msg': None}
homework = request.POST.get('homework')
avatar_obj = request.FILES.get('avatar')
if not avatar_obj:
response['msg'] = '抱歉!你的文件没有获取'
return JsonResponse(response)
file_path = os.path.join(settings.MEDIA_ROOT, 'avatars', avatar_obj.name)
print(file_path)
f = open(file_path, 'wb')
for chunk in avatar_obj.chunks():
f.write(chunk)
f.close()
models.HomeWorkStudentRecord.objects.filter(student_id=current_user_id,
course_record_id=course_record_id).update(homework=homework,
avatar=file_path)
url_name = '%s:%s' % (self.site.namespace, self.get_list_url_name)
return redirect(url_name)
form = basic_forms(instance=current_obj)
return render(request, self.change_template or 'stark/change.html', {'form': form})
def homework_view(self, request, *args, **kwargs):
course_record_id = kwargs.get('course_record_id')
homework_list = models.CourseRecord.objects.filter(pk=course_record_id)
return render(request, 'homework_view.html', {'homework_list': homework_list})
|
from setuptools import setup
setup(name='factory',
version='0.0.1',
install_requires=['gym', 'simpy']#And any other dependencies required
)
|
import sys
import readConf as rc
import readData as rd
import handleData as hd
import FPrecommendation as fpr
import ALSrecommendation as alsr
import KMEANSrecommendation as kmeansr
from pyspark import SparkConf, SparkContext
"""
this func is the entry of the whole application
"""
if __name__ == '__main__':
"""
need assign the path of drive file to argv[0]
need assign the path of recommendation_conf.json to argv[1]
"""
if len(sys.argv) != 6:
print('parameter error!')
exit(1)
conf = SparkConf()
sc = SparkContext(conf = conf)
sc.setLogLevel("OFF")
#初始化
print("============read conf==============")
# read conf from jsonfile
conf = rc.readConf(sys.argv[1])
# where is data from? hdfs or hive or others
mode = sys.argv[2]
location = sys.argv[3]
outputpath = sys.argv[4]
algorighm = sys.argv[5]
data = ""
#读取数据
if "hdfs" == mode :
print("============read data from hdfs==============")
data = rd.readDataFromHDFS(location)
elif "hive" == mode:
print("============read data from hive==============")
data = rd.readDataFromHIVE(location)
#处理数据
print("============handle data==============")
data = hd.handle(data,conf["actions"])
if algorighm == "FP":
print("============FP recommendation==============")
fpr.recommendation(data,conf["FP"],outputpath)
elif algorighm == "ALS":
print("============ALS recommendation==============")
alsr.recommendation(data,conf["ALS"],outputpath)
elif algorighm == "KMEANS":
print("============KMEANS recommendation==============")
kmeansr.recommendation(data,conf["KMEANS"],outputpath) |
#!/bin/python3
def minTime(machines, goal):
left = goal // len(machines)
right = max(machines) * goal
while left < right:
mid = left + (right - left) // 2
if sum(mid // m for m in machines) < goal:
left = mid + 1
else:
right = mid
return left
// C++
long minTime(vector<long> machines, long goal) {
long max_day = *max_element(machines.begin(), machines.end());
long left = 1;
long right = max_day * goal;
while (left < right) {
long mid = left + (right - left) / 2;
long sum = 0;
for (auto m : machines) sum += mid / m;
if (sum < goal)
left = mid + 1;
else
right = mid;
}
return left;
}
|
#!/usr/bin/python3
import requests
def urlpull():
#usrpull is variable from part1
#for now define usrpull as pikachu for testing
usrpull = 'pikachu'
pokemon = requests.get('https://pokeapi.co/api/v2/pokemon/' + usrpull)
link = pokemon.json()['sprites']['front_default']
print (link)
urlpull()
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2013 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
class account_move_line(osv.osv):
_inherit = 'account.move.line'
def _get_signed_amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
A adapted copy if the _amount_residual method in account/account_move_line.py allowing
calculation of residual amount in company currency to any account type based on
reconciliation
"""
res = {}
if context is None:
context = {}
for move_line in self.browse(cr, uid, ids, context=context):
if move_line.reconcile_id:
continue
line_total_in_company_currency = move_line.debit - move_line.credit
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
res[move_line.id] = line_total_in_company_currency
return res
_columns = {
'signed_amount_residual': fields.function(_get_signed_amount_residual, method=True, string='Available Amount', type='float', readonly=True),
}
account_move_line()
class account_move(osv.osv):
_inherit = 'account.move'
def validate(self, cr, uid, ids, context=None):
# Add a validation process that all periods being posted is still open
for _obj in self.browse(cr, uid, ids, context=context):
if _obj.period_id.state in ('done'):
raise osv.except_osv(_('Error!'), _("Move %s is dated in closed period.") % (_obj.name))
return super(account_move, self).validate(cr, uid, ids, context=context)
account_move()
|
import re
from core.msfActionModule import msfActionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_msf_smbuserenum(msfActionModule):
def __init__(self, config, display, lock):
super(scan_msf_smbuserenum, self).__init__(config, display, lock)
self.title = "Get List of Users From SMB"
self.shortName = "MSFSMBUserEnum"
self.description = "execute [auxiliary/scanner/smb/smb_enumusers] on each target"
self.requirements = ["msfconsole"]
self.triggers = ["nullSession"]
self.safeLevel = 5
def getTargets(self):
# we are interested only in the hosts that had nullsessions
self.targets = kb.get('vuln/host/*/nullSession')
def process(self):
# load any targets we are interested in
self.getTargets()
if len(self.targets) > 0:
# loop over each target
for t in self.targets:
# verify we have not tested this host before
if not self.seentarget(t):
# add the new IP to the already seen list
self.addseentarget(t)
cmd = {
'config':[
"use auxiliary/scanner/smb/smb_enumusers",
"set RHOSTS %s" % t
],
'payload':'none'}
result, outfile = self.msfExec(t, cmds)
# MSF output format:[*] [timestamp] IP DOMAIN [user,users] ( extras)
parts = re.findall(".*" + t.replace(".", "\.") + ".*", result)
for part in parts:
if "RHOSTS" in part:
pass
else:
try:
pieces = part.split()
domain = pieces[3]
kb.add("host/" + t + "/domain/" + domain.strip())
extras = part.split('(')[1].split(')')[0]
users = part.split('[')[3].split(']')[0].split(',')
for user in users:
kb.add("creds/host/" + t + "/username/" + user.strip())
except:
pass
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + Utils.getRandStr(10)
Utils.writeFile(result, outfile)
kb.add("host/" + t + "/files/" + self.shortName + "/" + outfile.replace("/", "%2F"))
return
|
import requests
import uuid
from django.shortcuts import get_object_or_404
from rest_framework import viewsets, status
from users.models import Profile, UserRelationship
from users.api.serializers import FullProfileSerializer, UserRelationshipSerializer, CondensedProfileSerializer, ProfileSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from users.constants import RELATIONSHIP_STATUS_PENDING, RELATIONSHIP_STATUS_FRIENDS, RELATIONSHIP_STATUS_FOLLOWING
from urlparse import urlparse
from rest_framework.authentication import TokenAuthentication, BasicAuthentication
from nodes.models import Node
from django.contrib.sites.models import Site
from users.utils import determine_if_request_from_foundbook, verify_friends
site_name = Site.objects.get_current().domain
class ProfileViewSet(viewsets.ModelViewSet):
"""
refer to http://www.django-rest-framework.org/api-guide/viewsets/#modelviewset
"""
lookup_field = 'uuid'
lookup_value_regex = '[^/]+'
serializer_class = FullProfileSerializer
queryset = Profile.objects.all()
authentication_classes = (BasicAuthentication, TokenAuthentication)
permission_classes = (IsAuthenticated,)
def retrieve(self, *args, **kwargs):
uuid = kwargs.get('uuid')
profile = None
try:
profile = Profile.objects.get(uuid=uuid)
except Profile.DoesNotExist:
for node in Node.objects.filter(is_allowed=True):
response = self.request_foreign_profile_data(node, uuid)
if response and response.status_code == 200:
profile = response.json()
return Response(status=status.HTTP_200_OK, data=profile)
return Response(status=status.HTTP_404_NOT_FOUND, data='There are no profiles matching the given UUID')
host = profile.host
local = (host == site_name)
if not local:
# if host in ['http://warm-hollows-14698.herokuapp.com/', 'http://radiant-beyond-17792.herokuapp.com/']:
# host += 'api/'
node = Node.objects.filter(host=host)
if node and node[0].is_allowed:
node = node[0]
response = self.request_foreign_profile_data(node, uuid)
if response and response.status_code == 200:
profile = response.json()
return Response(status=status.HTTP_200_OK, data=profile)
return Response(status=status.HTTP_400_BAD_REQUEST, data='Could not contact server. Received response:%s'
%(vars(response)))
else:
return Response(status=status.HTTP_401_UNAUTHORIZED, data='User is from an unaccepted server.')
serializer = FullProfileSerializer(profile)
return Response(status=status.HTTP_200_OK, data=serializer.data)
def list_local(self, *args, **kwargs):
listofauthors = []
local = Profile.objects.all()
node = Node.objects.all()
localserializer = FullProfileSerializer(local, many=True)
listofauthors.extend(localserializer.data)
return Response(status=status.HTTP_200_OK, data=listofauthors)
def list(self, *args, **kwargs):
listofauthors = []
local = Profile.objects.all()
localserializer = FullProfileSerializer(local, many=True)
listofauthors.extend(localserializer.data)
return Response(status=status.HTTP_200_OK, data=listofauthors)
def request_foreign_profile_data(self, node, uuid):
endpoint = 'author/'
api_url = node.host + node.api_endpoint + endpoint + str(uuid) + '/'
try:
response = requests.get(api_url, auth=(node.username_for_node, node.password_for_node))
except requests.ConnectionError:
response = None
return response
class MyFriendsProfilesViewSet(viewsets.ModelViewSet):
"""
returns the authenticated users friends list
"""
serializer_class = CondensedProfileSerializer
authentication_classes = (BasicAuthentication, TokenAuthentication)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""
The current users friends list
"""
friends = self.request.user.profile.friends
for friend in friends:
if(friend.host != site_name):
verify_friends(friend, self.request.user.profile)
return self.request.user.profile.friends
class UserRelationshipViewSet(viewsets.ModelViewSet):
lookup_field = 'uuid'
lookup_value_regex = '[^/]+'
serializer_class = UserRelationshipSerializer
model = UserRelationship
permission_classes = (IsAuthenticated,)
authentication_classes = (BasicAuthentication, TokenAuthentication)
queryset = Profile.objects.all()
def list(self, request, uuid):
"""
lists a given users friends
"""
try:
requested_profile = Profile.objects.get(uuid=uuid)
except Profile.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND, data="No profile with the given UUID is found on this server.")
friends = requested_profile.friends
for friend in friends:
if(friend.host != site_name):
verify_friends(friend, self.request.user.profile)
friends = requested_profile.friends
data = dict(
query='friends',
authors=list((friend.url for friend in friends))
)
return Response(data=data, status=status.HTTP_200_OK)
def query(self, request, uuid):
"""
this will check to see if any author uris in the given list is a friend with the requested author
"""
try:
requested_profile = Profile.objects.get(uuid=uuid)
except Profile.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND, data="No profile with the given UUID is found on this server.")
authors_list = request.data.get('authors', None)
result_friends_list = []
if authors_list:
for friend in requested_profile.friends:
if friend.api_id in authors_list:
result_friends_list.append(friend.api_id)
response_msg = dict(
query='friends',
author=requested_profile.api_id,
authors=result_friends_list
)
return Response(status=status.HTTP_200_OK, data=response_msg)
return Response(status=status.HTTP_400_BAD_REQUEST, data='no authors list given.')
class UserRelationshipFriendRequestViewSet(viewsets.ModelViewSet):
serializer_class = UserRelationshipSerializer
model = UserRelationship
authentication_classes = (BasicAuthentication, TokenAuthentication)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""
lists all authed users pending friend requests
"""
return UserRelationship.objects.filter(receiver=self.request.user.profile, status=RELATIONSHIP_STATUS_PENDING)
def create_or_update(self, *args, **kwargs):
"""
creates a user relationship via a post request to `api/friendrequest/` if initiator/receiver pair not in the DB
required params:
author = dict containing initiating users info
friend = dict containing receiving users info
otherwise it will update the UserRelationship represented by initiator/receiver pair
to update you need to add the status param
"""
data = self.request.data
foreign_user = None
role = None
must_create_profile = True
local_initiator = False
local_receiver = False
from_foundbook = determine_if_request_from_foundbook(data)
if from_foundbook: # set displayName for them!
try:
our_profile = Profile.objects.get(uuid=data.get('friend').get('id'))
displayName = our_profile.username
data.get('friend')['displayName'] = displayName
except Profile.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND, data="Profile with given UUID does not exist.")
try:
if from_foundbook:
local_author = Profile.objects.get(uuid=data.get('author').get('id'))
else:
local_author = Profile.objects.get(username=data.get('author').get('displayName'))
if local_author.host == site_name:
local_initiator = True
else:
must_create_profile = False
foreign_user = data.get('author')
except Profile.DoesNotExist:
foreign_user = data.get('author')
role = 'author'
try:
if from_foundbook:
local_friend = Profile.objects.get(uuid=data.get('friend').get('id'))
else:
local_friend = Profile.objects.get(username=data.get('friend').get('displayName'))
if local_friend.host == site_name:
local_receiver = True
else:
must_create_profile = False
foreign_user = data.get('friend')
except Profile.DoesNotExist:
foreign_user = data.get('friend')
role = 'friend'
if not local_initiator or not local_receiver: # one of the users is from another server
url_contents = urlparse(foreign_user.get('id')) # TODO this might not always be a url
host = foreign_user.get('host', foreign_user.get('id')[:foreign_user.get('id').find(url_contents.path) + 1] if not from_foundbook else None)
node = Node.objects.filter(host=host, is_allowed=True)
if node: # then we trust their server
identifier = url_contents.path.split('/')[-1]
if identifier == "":
identifier = url_contents.path.split('/')[-2]
requesting_node = Node.objects.filter(user=self.request.user) # we want to know if node is requesting for an update
if not requesting_node: # then a local user is requesting a friendship for a user on another server
node = node[0]
friend_request_url = '%s%sfriendrequest/' % (node.host, node.api_endpoint)
headers = {'Content-type': 'application/json'}
response = requests.post(friend_request_url, json=data, headers=headers, auth=(node.username_for_node, node.password_for_node))
print 'request sent to other server'
if response.status_code>=300:
return Response(status=response.status_code,
data=response.text)
if must_create_profile:
new_profile = Profile.objects.create(uuid=uuid.UUID(identifier).hex, username=foreign_user.get('displayName'),
host=host) # WARNING we will get errors because url will be our api endpoints
data[role] = CondensedProfileSerializer(new_profile).data
else:
return Response(status=status.HTTP_401_UNAUTHORIZED,
data='You are not an accepted server on our system.')
if not data.get('status', None):
data['status'] = RELATIONSHIP_STATUS_PENDING
serializer = UserRelationshipSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(status=status.HTTP_200_OK, data=serializer.data)
else:
return Response(status=status.HTTP_400_BAD_REQUEST, data=serializer.errors)
def destroy(self, *args, **kwargs):
"""
Depending on the status of the relationship, we either remove the instance entirely, or change the status around
If friends --> the deleting user becomes receiver of a "following" relationship
If following --> The entire UserRelationship object is removed
"""
pk = kwargs.get('pk', None)
instance = get_object_or_404(UserRelationship, pk=pk)
if self.request.user.profile == instance.initiator:
other_user_in_relationship = instance.receiver
elif self.request.user.profile == instance.receiver:
other_user_in_relationship = instance.initiator
else: # otherwise the user shouldn't have access to this object
return Response(data='You do not have access to this friendship.', status=status.HTTP_403_FORBIDDEN)
if instance.status == RELATIONSHIP_STATUS_FRIENDS:
# If friends then we change to following so other user can still see posts
instance.initiator = other_user_in_relationship
instance.receiver = self.request.user.profile
instance.status = RELATIONSHIP_STATUS_FOLLOWING
instance.save()
else:
instance.delete()
return Response(data="success", status=status.HTTP_200_OK)
|
# -*- coding: utf-8 -*-
"""
sports_dataset_frame.py
"""
from data.sports_dataset_frame_player import SportsDatasetFramePlayer
class SportsDatasetFrame():
"""
SportsDatasetFrame
"""
def __init__(self, match_status_id: int, frame_id: int, history_id: int,
ball_x: float, ball_y: float,
ball_player: SportsDatasetFramePlayer,
home_players: [SportsDatasetFramePlayer],
away_players: [SportsDatasetFramePlayer],
home_attack_direction: bool,
away_attack_direction: bool):
self.__match_status_id = match_status_id
self.__frame_id = frame_id
self.__history_id = history_id
self.__ball_x = ball_x
self.__ball_y = ball_y
self.__ball_player = ball_player
self.__home_players = home_players
self.__away_players = away_players
self.__home_attack_direction = home_attack_direction
self.__away_attack_direction = away_attack_direction
def get_match_status_id(self) -> int:
"""
get_match_status_id
"""
return self.__match_status_id
def get_frame_id(self) -> int:
"""
get_frame_id
"""
return self.__frame_id
def get_history_id(self) -> int:
"""
get_history_id
"""
return self.__history_id
def get_ball_x(self) -> float:
"""
get_ball_x
"""
return self.__ball_x
def get_ball_y(self) -> float:
"""
get_ball_y
"""
return self.__ball_y
def get_ball_player(self) -> SportsDatasetFramePlayer:
"""
get_ball_player
"""
return self.__ball_player
def get_home_players(self) -> [SportsDatasetFramePlayer]:
"""
get_home_players
"""
return self.__home_players
def get_away_players(self) -> [SportsDatasetFramePlayer]:
"""
get_away_players
"""
return self.__away_players
def get_home_attack_direction(self) -> bool:
"""
get_home_attack_direction
True: positive, False: negative
"""
return self.__home_attack_direction
def get_away_attack_direction(self) -> bool:
"""
get_away_attack_direction
True: positive, False: negative
"""
return self.__away_attack_direction
def get_home_players_points(self):
"""
get_home_players_points
"""
return self.__get_players_points(self.get_home_players())
def get_away_players_points(self):
"""
get_away_players_points
"""
return self.__get_players_points(self.get_away_players())
@staticmethod
def __get_players_points(players: [SportsDatasetFramePlayer]):
players_x = []
players_y = []
for player in players:
players_x.append(player.get_x())
players_y.append(player.get_y())
return players_x, players_y
|
# -*- coding:utf-8 -*-
from functools import partial
import sys
import uuid
import time
import json
import datetime
from PySide2 import QtCore
from PySide2.QtCore import Slot
from PySide2.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout, QGroupBox, QLabel,
QComboBox, QLineEdit, QGridLayout)
from initwidget.switchbutton import SwitchButton
# from initwidget.meterreport import MeterReport
from initwidget.meterstatus import MeterStatus
# from util.rest import InitReport
from util.util import get_config
from util.database import get_demos, get_channels, add_demo, add_channel
class InitWidget(QWidget):
"""
初始化控件
仪表的相关参数,需要通过配置文件配置
"""
def __init__(self):
super(InitWidget, self).__init__()
self.config = get_config()
#
vsub_box1 = QVBoxLayout()
# vsub_box1.addWidget(groupBox)
h4_box = QHBoxLayout()
lab4 = QLabel(u"数据初始化")
self.sbut = SwitchButton()
h4_box.addWidget(lab4)
h4_box.addWidget(self.sbut)
vsub_box1.addLayout(h4_box)
init_hbox = QVBoxLayout()
init_hbox.addLayout(vsub_box1)
self.tunnel_gb = QGridLayout()
self.gridlayouts = []
for i in range(1, 5):
box = self.create_tunnel_groupbox(None)
self.gridlayouts.append(box)
self.tunnel_gb.addLayout(box, int((i-1) / 2), (i-1) % 2 )
main_vbox = QVBoxLayout()
main_vbox.addLayout(init_hbox)
self.meterwid = MeterStatus()
demos = get_demos()
for dm in demos:
self.meterwid.addEntry(dm.id, dm.position, dm.onlinetime, count=4, freq=dm.freq)
main_vbox.addWidget(self.meterwid)
main_vbox.addLayout(self.tunnel_gb)
# main_vbox.addWidget(MeterReport())
self.setLayout(main_vbox)
self.meterwid.tableView.selectionModel().selectionChanged.connect(self.on_selectionChanged)
index = self.meterwid.tableModel.index(0, 0)
self.meterwid.tableView.setCurrentIndex(index)
@Slot('QItemSelection', 'QItemSelection')
def on_selectionChanged(self, selected, deselected):
print("selected: ")
for ix in selected.indexes():
if ix.column() == 0:
demo_id = int(ix.data())
channels = get_channels(demo_id)
i = 1
for ch in channels:
chdict = {}
chdict["channelPos"] = ch.position
chdict["dataFlow"] = ch.dataflow
chdict["frameSize"] = ch.framesize
chdict["sensorNum"] = ch.sensornum
chdict["channelNo"] = ch.number
chdict["demodulatorID"] = ch.demoid
chdict["packetSize"] = ch.packetsize
self.set_tunnel_groupbox(chdict, self.gridlayouts[i-1])
i += 1
@Slot(str, str)
def refresh_kafkamsg(self, key, value):
# print("init refresh msg")
tun_dict = json.loads(value)
demo = tun_dict["demodulator"]
add_demo(demo["demodulatorID"], demo["demodulatorPos"], demo["channelNum"],
demo["sampleFreq"], demo["onlineTime"], demo["status"])
for i in range(1, 5):
key = "channel%d" % i
channel = tun_dict[key]
add_channel(channel["channelNo"], channel["demodulatorID"], channel["channelPos"], channel["sensorNum"],
channel["dataFlow"], channel["frameSize"], channel["packetSize"], datetime.datetime.now())
self.set_tunnel_groupbox(channel, self.gridlayouts[i-1])
# save to db
def set_tunnel_groupbox(self, chnvalue, chngrid):
#
qnumber_line = chngrid.itemAtPosition(0, 1).widget()
text = u"%s号通道" % str(chnvalue["channelNo"])
qnumber_line.setText(text)
# 通道位置
chngrid.itemAtPosition(1, 1).widget().setText(str(chnvalue["channelPos"]))
# 数据流量
chngrid.itemAtPosition(1, 3).widget().setText(str(chnvalue["dataFlow"]) + "MB/s")
# 数据帧
chngrid.itemAtPosition(2, 1).widget().setText(str(chnvalue["frameSize"])+ u"帧")
# 测区数量
chngrid.itemAtPosition(2, 3).widget().setText(str(chnvalue["sensorNum"]))
# 数据包
chngrid.itemAtPosition(3, 1).widget().setText(
str(chnvalue["packetSize"]) + "*" + str(chnvalue["sensorNum"]))
# #数据类型
# chngrid.itemAtPosition(3, 3).widget().setText(str(chnvalue[""]))
def create_tunnel_groupbox(self, tunneldata):
if tunneldata == None:
tunnelnum = -1
tunnelpos = " "
dataflow = " "
sensornum = " "
frame = " "
# packetnum = " "
else:
tunnelnum = tunneldata.number
tunnelpos = tunneldata.position
sensornum = tunneldata.sensornum
frame = tunneldata.frame
# refresh data from data
group_lay = QGridLayout()
lab = QLabel(u"通道编号")
qlin = QLineEdit(u"%s号通道" % tunnelnum)
group_lay.addWidget(lab, 0, 0)
group_lay.addWidget(qlin, 0, 1)
lab = QLabel(u"通道位置")
qlin = QLineEdit(tunnelpos)
lab2 = QLabel(u"数据流量")
qlin2 = QLineEdit(dataflow)
group_lay.addWidget(lab, 1, 0)
group_lay.addWidget(qlin, 1, 1)
group_lay.addWidget(lab2, 1, 2)
group_lay.addWidget(qlin2, 1, 3)
lab = QLabel(u"数据帧")
qlin = QLineEdit(frame)
lab2 = QLabel(u"测区数量")
qlin2 = QLineEdit(sensornum)
group_lay.addWidget(lab, 2, 0)
group_lay.addWidget(qlin, 2, 1)
group_lay.addWidget(lab2, 2, 2)
group_lay.addWidget(qlin2, 2, 3)
lab = QLabel(u"数据包")
qlin = QLineEdit(u"200*527")
lab2 = QLabel(u"数据类型")
qlin2 = QLineEdit(u"float")
group_lay.addWidget(lab, 3, 0)
group_lay.addWidget(qlin, 3, 1)
group_lay.addWidget(lab2, 3, 2)
group_lay.addWidget(qlin2, 3, 3)
return group_lay
|
import tkinter as tk
from PIL import Image, ImageTk, ImageDraw
from .picture_button import Button
import numpy as np
class MainPictureComponent(tk.Label):
def __init__(self, master, image, file):
tk.Label.__init__(self, master = master, width = 180, height = 200, image = image, padx=10, pady=10, borderwidth=0)
self.bind("<Enter>", self.select_image)
self.bind("<Leave>", self.remove_buttons)
self.bind("<Double-Button-1>", self.destroy_self)
self.file = file
self.image = image
self.btn_move_image = ImageTk.PhotoImage(Image.open("Data\\repos\\move.png").resize((30,30), Image.ANTIALIAS))
self.btn_crop_image = ImageTk.PhotoImage(Image.open("Data\\repos\\crop.png").resize((30,30), Image.ANTIALIAS))
self.width = 180
self.height = 200
self.initialize_crop_buttons()
self.initialize_resize_buttons()
self.cropping = [False, False, False, False]
def destroy_self(self, event):
self.destroy()
def remove_buttons(self, event):
self.unshow_buttons()
def unshow_buttons(self):
for i in range(4):
self.resize_buttons[i].place_forget()
self.move.place_forget()
self.crop.place_forget()
for i in range(4):
if not self.cropping[i]:
self.crop_buttons_frames[i].place_forget()
def select_image(self, event):
self.width = event.widget.winfo_width()
self.height = event.widget.winfo_height()
self.select(self.width, self.height)
def select(self, width, height):
self.unshow_buttons()
self.display_crop_buttons(width, height)
self.update_buttons_position(width, height)
def resize_crop_buttons(self, width, height):
for i in range (4):
btn_width = width if (i % 2) == 0 else 10
btn_height = height if (i % 2) != 0 else 10
self.crop_buttons_frames[i].config(width = btn_width, height = btn_height)
if not self.cropping[i]:
self.display_crop_buttons(width, height)
def initialize_crop_buttons(self):
self.crop_buttons = []
self.crop_buttons_frames = []
for i in range(4):
width = self.width
height = self.height
btn_width = width if (i % 2) == 0 else 10
btn_height = height if (i % 2) != 0 else 10
self.crop_buttons_frames.append(tk.Frame(self, width = btn_width, height = btn_height))
self.crop_buttons_frames[i].propagate(False)
self.crop_buttons.append(tk.Button(self.crop_buttons_frames[i], bg="red"))
self.crop_buttons[i].bind("<B1-Motion>", self.update_crop_position)
self.crop_buttons[i].bind("<ButtonRelease-1>", self.release_update_crop_position)
self.crop_buttons[i].pack(expand = True, fill= tk.BOTH)
self.crop_buttons[i].id = i
def display_crop_buttons(self, width, height):
positions = [
['nw', 0, 0],
['ne', width, 0],
['sw', 0, height],
['nw', 0, 0]
]
for i in range(4):
if not self.cropping[i]:
self.crop_buttons_frames[i].place(anchor = positions[i][0], x = positions[i][1] , y = positions[i][2])
def initialize_resize_buttons(self):
ids = [11, 1, 0, 10]
anchors = ['nw', 'ne', 'se', 'sw']
self.resize_buttons = []
for i in range(4):
self.resize_buttons.append(Button(master = self, id = ids[i], anchor = anchors[i], config = {"width":2, "height":1, "text":"", "bg":'black'}))
self.resize_buttons[i].bind("<B1-Motion>", self.resize_picture)
self.resize_buttons[i].bind("<ButtonRelease-1>", self.update)
self.move = tk.Button(self, width = 30, height = 30, image = self.btn_move_image)
self.crop = tk.Button(self, width = 30, height = 30, image = self.btn_crop_image)
self.move.bind("<B1-Motion>", self.move_picture)
self.move.bind("<ButtonRelease-1>", self.stop_move_picture)
self.crop.bind("<Button-1>", self.crop_image)
def update(self, event):
width = self.winfo_width()
height = self.winfo_height()
self.update_buttons_position(width, height)
self.resize_crop_buttons(width, height)
self.image = ImageTk.PhotoImage(self.resized)
self.config(image = self.image)
self.select(width, height)
self.width = width
self.height = height
def update_buttons_position(self, width, height):
for i in range(4):
self.resize_buttons[i].place_button(width, height)
self.resize_buttons[i].event_x = 0
self.resize_buttons[i].event_y = 0
self.move.place(anchor="center", x = width/2, y = height/2)
self.crop.place(anchor="center", x = width/2, y = height - 40)
def update_crop_position(self, event):
widget = event.widget.master
widget_x = widget.winfo_x()
widget_y = widget.winfo_y()
if event.widget.id % 2 == 1:
widget.place(x = widget_x + event.x, y = widget_y, anchor="nw")
else:
widget.place(y = widget_y + event.y, x = widget_x, anchor = "nw")
self.cropping[event.widget.id] = True
def release_update_crop_position(self, event):
self.select(self.width, self.height)
def crop_image (self, event):
img_width, img_height = event.widget.master.file.size
ratio_x = img_width / self.winfo_width()
ratio_y = img_height / self.winfo_height()
box = []
buttons = self.crop_buttons_frames
box.append(buttons[3].winfo_x() * ratio_x)
box.append(buttons[0].winfo_y() * ratio_y)
box.append((buttons[1].winfo_x() + buttons[1].winfo_width()) * ratio_x)
box.append((buttons[2].winfo_y() + buttons[2].winfo_height()) * ratio_y)
file = event.widget.master.file
new_image = file.crop(box).resize((self.width, self.height), Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(new_image)
self.file = new_image
self.config(image = self.image)
self.cropping = [False, False, False, False]
self.select(self.width, self.height)
def move_picture(self, event):
self.unshow_buttons()
container = event.widget.master
container_x = container.winfo_x()
container_y = container.winfo_y()
container_width = container.winfo_width()
container_height = container.winfo_height()
canvas = container.master
canvas_width = canvas.winfo_width()
canvas_height = canvas.winfo_height()
if (container_x + event.x > 0) & (container_y + event.y > 0) & (event.x + container_width + container_x < canvas_width) & (event.y + container_height + container_y < canvas_height):
container.place(anchor="nw", x = event.x + container_x, y = event.y + container_y)
def stop_move_picture(self, event):
self.select(self.width, self.height)
def resize_picture(self, event):
self.unshow_buttons()
button = event.widget
offset_x = int (button.id / 10)
offset_y = button.id % 10
container = button.master
container_width = container.winfo_width()
container_height = container.winfo_height()
container_x = container.winfo_x() - 2
container_y = container.winfo_y() - 2
canvas_width = container.master.winfo_width()
canvas_height = container.master.winfo_height()
button.event_x = event.x - button.event_x
button.event_y = event.y - button.event_y
new_width = max((container_width + (button.event_x if offset_x == 0 else -event.x), 50))
new_height = max((container_height + (button.event_y if offset_y == 0 else -event.y), 150))
if (container_width + button.event_x > 0) & (container_height + button.event_y > 0) & (container_x + new_width < canvas_width) & (container_y + new_height < canvas_height):
self.resized = self.file.resize((new_width, new_height), Image.ANTIALIAS)
container.config(width = new_width, height = new_height)
container.place(x = offset_x * (container_width - new_width) + container_x, y = offset_y * (container_height - new_height) + container_y, anchor="nw")
button.event_x = event.x
button.event_y = event.y |
''' This script allows you to input x amount of digits to form a "ticket" (list of those numbers)
and check how many times you have to generate another list of 4 numbers (won_ticket)
to match yours. Other words = lotteries have pretty bad rates, I'd say.
I left the value extra low, for demo purposes'''
from random import randrange, choice
lottery = list(range(1, 11))
def user_choice_get():
print("Welcome to lottery! Input digits you think will win")
user_choice = []
while len(user_choice) < 2:
choice = input(f"Enter digits from 1 to {len(lottery)}: ")
try:
if choice == "exit":
break
choice = int(choice)
if choice > 0 and choice < 11:
user_choice.append(choice)
print(f"{choice} added to your ticket. It contains {user_choice} now")
else:
print("That's not a correct number to add")
print(f"your ticket contains {user_choice} now")
except ValueError:
print("Something went wrong!")
print(f"your ticket contains {user_choice} now")
if user_choice and len(user_choice) == 2:
return user_choice
else:
print("You didn't provide all 2 numbers for lottery ticket")
my_ticket = user_choice_get()
def run_the_show():
counter = 0
won_ticket = None
for i in range(1, 100000000):
if won_ticket != my_ticket:
won_ticket = random_ticket()
counter += 1
else:
break
if won_ticket == my_ticket:
print(f"Match of {my_ticket} found!")
print(f"\nIt took {counter} tries to crack the lottery!")
else:
print(f"I ran {counter} attempts but the match was not found")
print(f"your ticket was{my_ticket}")
def random_ticket():
won_ticket = []
while len(won_ticket) < 2:
x = choice(lottery)
won_ticket.append(x)
return won_ticket
run_the_show()
|
#take a file name as command line argument, count how many times each word appears in the file and prints the world
#that appears the most
#如果想对python脚本传参数,python中对应的argc, argv(c语言的命令行参数)是什么呢?
# 需要模块:sys
# 参数个数:len(sys.argv)
import sys
def read_file(file_path):
file_to_read= open(file_path, "r") # 读成char的list
txt=file_to_read.read()
txt = txt.lower()
file_to_read.close()
return txt
def replace(txt):
for ch in ',~!@#$%^&*()_+"{}[]|?.<>?\n':
txt = txt.replace(ch,' ') # ch被替换成空.注意: 如果ch替换成''会导致s\n1\n.....替换成s11111.注意在赋值为''空时候,相当于list这个值被去掉了
return txt
def word_dict(txt):
word_dic = {}
str = txt.split() # 通过空格或者\n进行分割成词
# print(str)
for word in str: # 判断词语是否已经在字典中出现
if word in word_dic:
word_dic[word] += 1
else:
word_dic[word] = 1
print(word_dic)
return word_dic
'''
print("name of the script: %s" % sys.argv[0]) #python practice2.py a bsda jkas: length l=4
print(len(sys.argv))
for i in range(1,len(sys.argv)):
print(i)
# fileHandle = open("test.txt",w)
# fileHandle.write ( '\n\nBottom line.' ) #write
txt=open("./temp/t1.txt","r").read() #读成char的list
txt=txt.lower()
for ch in ',~!@#$%^&*()_+"{}[]|?.<>?\n':
txt=txt.replace(ch,'') #ch被替换成空
word_dic={}
str=txt.split() #通过空格或者\n进行分割成词
for word in str: #判断词语是否已经在字典中出现
if word in word_dic:
word_dic[word] += 1
else:
word_dic[word]=1
word_lis=word_dic.items() #将dic变成list
word_lis=sorted(word_lis,key=lambda x:x[1],reverse=True) #比较list中每两个的数字排序。逆序排列。也可以按key排列 key=lambda d:d[0]
print(word_lis[0][0])
'''
if __name__ == '__main__':
print('start \n')
file_path= sys.argv[1]
print(file_path)
txt=read_file(file_path)
txt=replace(txt)
print(txt)
dic=word_dict(txt)
word_lis = dic.items() # 将dic变成list
word_lis = sorted(word_lis, key=lambda x: x[1], reverse=True) # 比较list中每两个的数字排序。逆序排列。也可以按key排列 key=lambda d:d[0]
print("%s appears %d times" % (word_lis[0][0],word_lis[0][1])) #注意print%后为一个list,要用()包含打印内容
|
"""
What are the results of the following operations? Why?
1.5 + 2
1.5 // 2.0
1.5 / 2.0
1.5 ** 2
1 / 2
-3 // 2
""" |
print("Importing helpers...")
from mp3helpers import *
print("Importing other stuff...")
from collections import defaultdict, OrderedDict
from time import time
import copy
import numpy as np
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
import sys
print("Done importing...")
chunk = 1000
lenDict = 50000
steps = sys.argv[1:]
smooth=1
for arg in steps:
if arg.startswith('smooth'):
smooth = float(arg.split('=')[-1])
break
print(smooth)
if 'stop' in steps: isstop = True
else: isstop = False
if 'stem' in steps: isstem = True
else: isstem = False
print('STOP IS', isstop)
print('STEM IS', isstem)
ppFilenames = list(map(pp_filename, filenameNos))
testFilenames = ppFilenames[10000:70335:2]
lenTest = len(testFilenames)
trainFilenames = list(set(ppFilenames) - set(testFilenames))
dictionary = defaultdict(int)
def map_task_multi(func, List):
vals = []
with Pool(processes=None) as pool:
for val, i in zip(pool.imap(func, List), range(1, len(List)+1)):
vals.append(val)
if i % 1000 == 0:
print(i, "out of", len(List))
return vals
def map_task_single(func, List):
vals = []
for val, i in zip(map(func, List), range(1, len(List)+1)):
vals.append(val)
if i % 1000 == 0:
print(i, "out of", len(List))
return vals
def step1(filename):
#print('Reading', filename)
rawEmail = read_file(filename)
#print('Preprocessing', filename)
ppEmail = preprocess(rawEmail, stop=isstop, stem=isstem)
save_file((ppEmail, pp_filename(filename)))
#return rawEmail
def step2(text):
global dictionary
for word in text.split(' '):
dictionary[word] += 1
#print(dictionary)
def step2_file(filename):
#print('Processing', filename)
step2(read_file(filename))
def read_dictionary():
return(read_file('dictionary.txt').split('\n'))
def step3_file(filename):
fileWords = read_file(filename).split(' ')
freqs = step3(fileWords)
if is_spam(filename) == 'spam':
return freqs+',1'
else:
return freqs+',0'
def step3(fileWords):
#cp1 = time()
fileDict = OrderedDict.fromkeys(dictionaryList, 0)
#cp2 = time()
for word in fileWords:
if word in fileDict:
fileDict[word] += 1
#cp3 = time()
#trainCsv.write(','.join(map(str, fileDict.values())) + '\n')
#print(cp2-cp1, cp3-cp2)
return ','.join(map(str, fileDict.values()))
def step3_all(fnList, datasetFn):
Csv = open(datasetFn, 'w')
chunk = 100
vals = []
for val, i in zip(map(step3_file, fnList), range(1, len(fnList)+1)):
vals.append(val)
if i % 200 == 0:
print(i, "out of", len(fnList))
if i % chunk == 0:
#print("writing")
Csv.write('\n'.join(vals)+'\n')
vals = []
Csv.write('\n'.join(vals))
def test_on_csv(model, csvfile):
X = np.zeros((chunk, lenDict))
testCsv = open(csvfile, 'r')
isDone = False
Y = []
P = []
COUNT = 0
while not isDone:
ct = 0
for line in testCsv:
vector = [int(i) for i in line.split(',')]
X[ct] = vector[:-1]
Y.append(vector[-1])
ct += 1
if ct == chunk:
break
if ct == 0:
break
if ct != chunk:
X = np.resize(X, (ct, lenDict))
isDone = True
P += list(model.predict(X))
COUNT += ct
print(COUNT)
correct = np.sum(np.array(Y) == np.array(P))
print('%d out of %d. %s%%' % (correct, COUNT, str(correct / COUNT * 100)))
def step4():
print('Training...')
trainCsv = open('dataset-train.csv', 'r')
X = np.zeros((chunk, lenDict))
if 'ber' in steps:
model = BernoulliNB(alpha=smooth)
elif 'mul' in steps:
model = MultinomialNB(alpha=smooth)
else:
model = MultinomialNB(alpha=smooth)
isDone = False
COUNT = 0
while not isDone:
ct = 0
Y = []
for line in trainCsv:
vector = [int(i) for i in line.split(',')]
X[ct] = vector[:-1]
Y.append(vector[-1])
ct += 1
if ct == chunk:
break
if ct == 0:
break
if ct != chunk:
X = np.resize(X, (ct, lenDict))
isDone = True
model.partial_fit(X, Y, classes=[0,1])
COUNT += ct
print(COUNT)
print('Testing on training set...')
test_on_csv(model, 'dataset-train.csv')
print('Testing on test set...')
test_on_csv(model, 'dataset-test.csv')
total=len(filenames)
print(str(total), 'files')
if '1' in steps:
print('Doing step 1...')
#map_task_multi(step1, filenames)
map_task_single(step1, filenames)
if '2' in steps:
print('Doing step 2...')
wordLists = map_task_single(step2_file, ppFilenames)
print('Saving file...')
sortedTuples = sorted(dictionary.items(), key=lambda i: i[1], reverse=True)
save_file((
'\n'.join(map(lambda i: i[0], sortedTuples[:lenDict])),
'dictionary.txt'
))
if '3' in steps:
print('Doing step 3...')
dictionaryList = read_dictionary()
step3_all(trainFilenames, 'dataset-train.csv')
step3_all(testFilenames, 'dataset-test.csv')
if '4' in steps:
print('Doing step 4...')
step4()
|
# !/usr/bin/env python
# coding=utf-8
"""
Graphs for lecture 4, plot solution to an ODE
"""
from __future__ import print_function
import sys
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import fsolve
from common import make_fig, GOOD_RET
__author__ = 'hbmayes'
# noinspection PyUnusedLocal
def ode(y, t, k, k_c, cao, nu_0, gas=True):
"""
:param gas: flag to account for volume change
:param y: independent variable
:param t: dependent variable
:param k: rate coefficient
:param k_c: equilibrium coefficient
:param cao: initial concentration
:param nu_0: initial volumetric flow rate
:return: dy_dt
"""
if gas:
vol_change = 1-0.5*y
else:
vol_change = 1
return 2.0 * k / nu_0 * (cao * np.square((1.0-y)/vol_change) - y * 0.5 / k_c / vol_change)
# noinspection PyTypeChecker
def solve_ode():
"""
Solve single ODE
"""
fig_name = "lect5"
k = 0.2 # L/mol s
k_c = 20.0 # L/mol
cao = 0.2 # mol/L
x0 = 0.0 # initial conversion
nu_0 = 1.0 # L / s
v_start = 0.0
v_end = 60.0
volume = np.linspace(v_start, v_end, 1001) # L
conv = odeint(ode, x0, volume, args=(k, k_c, cao, nu_0))
conv_liq = odeint(ode, x0, volume, args=(k, k_c, cao, nu_0, False))
# here, need to add the additional argument of "t" because of how "ode" was set up for "odeint"
x_eq = fsolve(ode, 0.5, args=(v_end, k, k_c, cao, nu_0))
x_eq_liq = fsolve(ode, 0.5, args=(v_end, k, k_c, cao, nu_0, False))
make_fig(fig_name + "_conversion", volume, conv,
x_label=r'volume (L)', y_label=r'conversion (unitless)', y1_label=r'X(V)',
x2_array=[v_start, v_end], y2_array=[x_eq, x_eq], y2_label=r'X$_{eq}$',
x_lima=0.0, x_limb=v_end,
y_lima=0.0, y_limb=1.0,
fig_width=8, fig_height=4,
)
make_fig(fig_name + "_conversion_liq", volume, conv_liq,
x_label=r'volume (L)', y_label=r'conversion (unitless)', y1_label=r'X(V)',
x2_array=[v_start, v_end], y2_array=[x_eq_liq, x_eq_liq], y2_label=r'X$_{eq}$',
x_lima=0.0, x_limb=v_end,
y_lima=0.0, y_limb=1.0,
fig_width=8, fig_height=4,
)
vol_change = 1.0 - 0.5 * conv
c_a_no_vol = cao * (1.0 - conv_liq)
c_b_no_vol = cao * conv_liq * 0.5
c_a = cao * (1.0 - conv) / vol_change
c_b = cao * conv * 0.5 / vol_change
make_fig(fig_name + "_concentration", volume, c_a,
y1_label="A", y2_array=c_b, y2_label="B", color2="red",
x_label=r'volume (L)', y_label=r'concentration (mol/L)',
x_lima=0.0, x_limb=v_end,
y_lima=0.0, y_limb=cao,
fig_width=8, fig_height=4,
)
make_fig(fig_name + "_concentration_liq", volume, c_a_no_vol,
y1_label="A", y2_array=c_b_no_vol, y2_label="B", color2="red",
x_label=r'volume (L)', y_label=r'concentration (mol/L)',
x_lima=0.0, x_limb=v_end,
y_lima=0.0, y_limb=cao,
fig_width=8, fig_height=4,
)
conv_2 = odeint(ode, x0, volume, args=(k, k_c, cao, nu_0*0.5))
conv_3 = odeint(ode, x0, volume, args=(k, k_c, cao, nu_0*2.0))
make_fig(fig_name + "_clicker", volume, conv,
x_label=r'volume (L)', y_label=r'conversion (unitless)', y1_label=r'A) No change',
y2_array=conv * 2.0, y2_label=r'B) ', y3_array=conv * 0.5, y3_label=r'C) ',
y4_array=conv_2, y4_label=r'D) ', y5_array=conv_3, y5_label=r'E) ',
x_lima=0.0, x_limb=v_end,
y_lima=0.0, y_limb=1.0,
fig_width=8, fig_height=4,
)
x_leven = np.linspace(0.0, 0.7, 1001) # conversion, unitless
y2 = 1.0 / ode(x_leven, 1.0, k, k_c, cao, nu_0)
# eps = -0.5
# y_leven = nu_0 * k_c/k * np.square(1 + eps * x_leven) / (2 * k_c * cao * np.square(1-x_leven) -
# x_leven * (1 + eps * x_leven))
make_fig(fig_name + "_levenspiel", x_leven, y2,
x_label=r'conversion (unitless)', y_label=r'$\frac{-F_{A0}}{r_A}$ (L)',
y_lima=0.0, y_limb=150,
x_lima=0.0, x_limb=0.8,
fig_width=8, fig_height=4,
)
def main():
""" Runs the main program.
"""
solve_ode()
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
from copy import deepcopy, copy
from sqlalchemy_dao import Dao
from werkzeug.exceptions import NotFound
from schema import Articles, DB, Categories
# aggiunta gestione categorie
class ArticlesDAO(object):
def __init__(self):
self.dao = Dao(DB)
def find_all(self):
with self.dao.create_session() as session:
articles = session.query(Articles).all()
return deepcopy(articles) # con copy non funziona
def create(self, new_art):
with self.dao.create_session() as session:
new_article = Articles(**new_art)
session.add(new_article)
session.flush()
return {"id": new_article.id, **new_art}
def update(self, get_id, new_art):
with self.dao.create_session() as session:
session.query(Articles).filter_by(id=get_id).update(new_art)
session.commit()
a = session.query(Articles).get(get_id)
return {"id": get_id, **new_art}
def find_id(self, id):
with self.dao.create_session() as session:
articles = session.query(Articles).filter(Articles.id == id).first()
if articles:
return deepcopy(articles)
else:
raise NotFound
def delete(self, id):
with self.dao.create_session() as session:
articles = session.query(Articles).filter(Articles.id == id).first()
if articles:
session.delete(articles)
session.commit()
return "", 200
else:
raise NotFound
def find_all_by_cat(self, id):
with self.dao.create_session() as session:
articles = session.query(Articles).filter(Articles.id_category == id).all()
if articles:
return deepcopy(articles)
|
# coding: utf-8
"""
Yagna Market API
## Yagna Market The Yagna Market is a core component of the Yagna Network, which enables computational Offers and Demands circulation. The Market is open for all entities willing to buy computations (Demands) or monetize computational resources (Offers). ## Yagna Market API The Yagna Market API is the entry to the Yagna Market through which Requestors and Providers can publish their Demands and Offers respectively, find matching counterparty, conduct negotiations and make an agreement. This version of Market API conforms with capability level 1 of the <a href=\"https://docs.google.com/document/d/1Zny_vfgWV-hcsKS7P-Kdr3Fb0dwfl-6T_cYKVQ9mkNg\"> Market API specification</a>. Market API contains two roles: Requestors and Providers which are symmetrical most of the time (excluding agreement phase). # noqa: E501
The version of the OpenAPI document: 1.5.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ya_market.configuration import Configuration
class Agreement(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'agreement_id': 'str',
'demand': 'Demand',
'offer': 'Offer',
'valid_to': 'datetime',
'approved_date': 'datetime',
'state': 'str',
'proposed_signature': 'str',
'approved_signature': 'str',
'committed_signature': 'str'
}
attribute_map = {
'agreement_id': 'agreementId',
'demand': 'demand',
'offer': 'offer',
'valid_to': 'validTo',
'approved_date': 'approvedDate',
'state': 'state',
'proposed_signature': 'proposedSignature',
'approved_signature': 'approvedSignature',
'committed_signature': 'committedSignature'
}
def __init__(self, agreement_id=None, demand=None, offer=None, valid_to=None, approved_date=None, state=None, proposed_signature=None, approved_signature=None, committed_signature=None, local_vars_configuration=None): # noqa: E501
"""Agreement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._agreement_id = None
self._demand = None
self._offer = None
self._valid_to = None
self._approved_date = None
self._state = None
self._proposed_signature = None
self._approved_signature = None
self._committed_signature = None
self.discriminator = None
self.agreement_id = agreement_id
self.demand = demand
self.offer = offer
self.valid_to = valid_to
if approved_date is not None:
self.approved_date = approved_date
self.state = state
if proposed_signature is not None:
self.proposed_signature = proposed_signature
if approved_signature is not None:
self.approved_signature = approved_signature
if committed_signature is not None:
self.committed_signature = committed_signature
@property
def agreement_id(self):
"""Gets the agreement_id of this Agreement. # noqa: E501
:return: The agreement_id of this Agreement. # noqa: E501
:rtype: str
"""
return self._agreement_id
@agreement_id.setter
def agreement_id(self, agreement_id):
"""Sets the agreement_id of this Agreement.
:param agreement_id: The agreement_id of this Agreement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and agreement_id is None: # noqa: E501
raise ValueError("Invalid value for `agreement_id`, must not be `None`") # noqa: E501
self._agreement_id = agreement_id
@property
def demand(self):
"""Gets the demand of this Agreement. # noqa: E501
:return: The demand of this Agreement. # noqa: E501
:rtype: Demand
"""
return self._demand
@demand.setter
def demand(self, demand):
"""Sets the demand of this Agreement.
:param demand: The demand of this Agreement. # noqa: E501
:type: Demand
"""
if self.local_vars_configuration.client_side_validation and demand is None: # noqa: E501
raise ValueError("Invalid value for `demand`, must not be `None`") # noqa: E501
self._demand = demand
@property
def offer(self):
"""Gets the offer of this Agreement. # noqa: E501
:return: The offer of this Agreement. # noqa: E501
:rtype: Offer
"""
return self._offer
@offer.setter
def offer(self, offer):
"""Sets the offer of this Agreement.
:param offer: The offer of this Agreement. # noqa: E501
:type: Offer
"""
if self.local_vars_configuration.client_side_validation and offer is None: # noqa: E501
raise ValueError("Invalid value for `offer`, must not be `None`") # noqa: E501
self._offer = offer
@property
def valid_to(self):
"""Gets the valid_to of this Agreement. # noqa: E501
End of validity period. Agreement needs to be accepted, rejected or cancellled before this date; otherwise will expire # noqa: E501
:return: The valid_to of this Agreement. # noqa: E501
:rtype: datetime
"""
return self._valid_to
@valid_to.setter
def valid_to(self, valid_to):
"""Sets the valid_to of this Agreement.
End of validity period. Agreement needs to be accepted, rejected or cancellled before this date; otherwise will expire # noqa: E501
:param valid_to: The valid_to of this Agreement. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and valid_to is None: # noqa: E501
raise ValueError("Invalid value for `valid_to`, must not be `None`") # noqa: E501
self._valid_to = valid_to
@property
def approved_date(self):
"""Gets the approved_date of this Agreement. # noqa: E501
date of the Agreement approval # noqa: E501
:return: The approved_date of this Agreement. # noqa: E501
:rtype: datetime
"""
return self._approved_date
@approved_date.setter
def approved_date(self, approved_date):
"""Sets the approved_date of this Agreement.
date of the Agreement approval # noqa: E501
:param approved_date: The approved_date of this Agreement. # noqa: E501
:type: datetime
"""
self._approved_date = approved_date
@property
def state(self):
"""Gets the state of this Agreement. # noqa: E501
* `Proposal` - newly created by a Requestor (based on Proposal) * `Pending` - confirmed by a Requestor and send to Provider for approval * `Cancelled` by a Requestor * `Rejected` by a Provider * `Approved` by both sides * `Expired` - not accepted, rejected nor cancelled within validity period * `Terminated` - finished after approval. # noqa: E501
:return: The state of this Agreement. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Agreement.
* `Proposal` - newly created by a Requestor (based on Proposal) * `Pending` - confirmed by a Requestor and send to Provider for approval * `Cancelled` by a Requestor * `Rejected` by a Provider * `Approved` by both sides * `Expired` - not accepted, rejected nor cancelled within validity period * `Terminated` - finished after approval. # noqa: E501
:param state: The state of this Agreement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["Proposal", "Pending", "Cancelled", "Rejected", "Approved", "Expired", "Terminated"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def proposed_signature(self):
"""Gets the proposed_signature of this Agreement. # noqa: E501
:return: The proposed_signature of this Agreement. # noqa: E501
:rtype: str
"""
return self._proposed_signature
@proposed_signature.setter
def proposed_signature(self, proposed_signature):
"""Sets the proposed_signature of this Agreement.
:param proposed_signature: The proposed_signature of this Agreement. # noqa: E501
:type: str
"""
self._proposed_signature = proposed_signature
@property
def approved_signature(self):
"""Gets the approved_signature of this Agreement. # noqa: E501
:return: The approved_signature of this Agreement. # noqa: E501
:rtype: str
"""
return self._approved_signature
@approved_signature.setter
def approved_signature(self, approved_signature):
"""Sets the approved_signature of this Agreement.
:param approved_signature: The approved_signature of this Agreement. # noqa: E501
:type: str
"""
self._approved_signature = approved_signature
@property
def committed_signature(self):
"""Gets the committed_signature of this Agreement. # noqa: E501
:return: The committed_signature of this Agreement. # noqa: E501
:rtype: str
"""
return self._committed_signature
@committed_signature.setter
def committed_signature(self, committed_signature):
"""Sets the committed_signature of this Agreement.
:param committed_signature: The committed_signature of this Agreement. # noqa: E501
:type: str
"""
self._committed_signature = committed_signature
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Agreement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Agreement):
return True
return self.to_dict() != other.to_dict()
|
# coding:utf-8
import serial, commands
import time
import re
from binascii import hexlify, unhexlify
from struct import pack, unpack
import math
import codecs
class UCam(object):
"""
an interface to communicate with a uCam-II camera
over a UART serial connection.
"""
def __init__(self):
# change serial device name to yours
self.ser = serial.Serial('/dev/tty.usbserial-xxxxxxxxx', baudrate=921600, timeout=.01)
self.synced = False
print("initialized!")
def sync(self):
num_tries = 60 # 60 times is enough by the documents
while num_tries > 0:
if self._sync():
return True
num_tries -= 1
return False
def _write(self, string):
return self.ser.write(bytearray(unhexlify(string)))
def _matches(self, pattern, packet):
packet_str = hexlify(packet)
return re.match(pattern, packet_str.decode()) is not None
def _sync(self):
time.sleep(.05)
self._write(commands.sync())
read = self.ser.read(6)
print(read)
if self._matches(commands.ack('0d', '..'), read):
if self._matches(commands.sync(), self.ser.read(6)):
self._write(commands.ack('0d', '00'))
return True
return False
def _initial(self):
init_cmd = commands.initial('07', '07', '07')
print("init cmd {}".format(init_cmd))
self._write(init_cmd)
# print("send init commandだよ")
# print(init_cmd)
# self.ser.write(bytearray(unhexlify('aa0100070707')))
read = self._wait_for_bytes(6)
print("return ack, maybe AA 0E 01 xx 00 00")
print('ack {}'.format(read))
assert self._matches(commands.ack('01', '..'), read)
def _wait_for_bytes(self, i):
bytearr = bytearray(i)
cur = 0
while cur < i:
read = self.ser.read(1)
if len(read) == 1:
bytearr[cur] = read[0]
cur += 1
return bytearr
def _set_pkg_size(self):
# set package size 512 bytes
self._write(commands.set_pkg_size('00', '02'))
assert self._matches(commands.ack('06', '..'), self._wait_for_bytes(6))
def _snapshot(self):
self._write(commands.snapshot('00', '00', '00'))
assert self._matches(commands.ack('05', '..'), self._wait_for_bytes(6))
def _get_picture(self):
"""
sends the GET PICTURE command and receives the corresponding DATA command.
Returns the number of packets to be read.
"""
self._write(commands.get_picture('01'))
assert self._matches(commands.ack('04', '..'), self._wait_for_bytes(6))
# receive DATA
data = self._wait_for_bytes(6)
print("data is ", data)
assert self._matches(commands.data('01', '..', '..', '..'), data)
print("hexlify(data) is: ", hexlify(data))
print("data is", data)
print("data[-3:] is ", data[-3:])
# below line is too redundant... to avoid "UnicodeDecodeError: 'utf-8' codec can't decode byte 0x9a in position 0: invalid start byte"
# img_size = unpack('<I', (codecs.decode(codecs.encode(unhexlify(hexlify(data[-3:])), 'hex'), 'hex') + b'\x00'))[0]
# img_size = unpack('<I', (unhexlify(hexlify(data[-3:])) + b'\x00'))[0]
# this is simple, maybe best
img_size = unpack('<I', (data[-3:] + b'\x00'))[0]
print("image size is {}".format(img_size))
### num_pkgs must be int
# num_pkgs = img_size / (512 - 6)
num_pkgs = math.floor(img_size / (512 - 6))
print("num packages: {}".format(num_pkgs))
self._write(commands.ack('00', '00'))
return img_size
def _write_picture(self, img_size, name='pic.jpeg'):
### num_pkgs must be int
# num_pkgs = img_size / (512 - 6)
num_pkgs = math.floor(img_size / (512 - 6))
with open(name, 'wb+') as f:
for i in range(1, num_pkgs + 1):
# print("getting package {}".format(i))
read = self._wait_for_bytes(512)
# print(read)
f.write(read[4:-2])
### hex_idx must be str
# hex_idx = hexlify(pack('H', i))
hex_idx = hexlify(pack('H', i)).decode()
# print("hex_idx is {}".format(hex_idx))
self._write(commands.ack('00', '00', hex_idx[:2], hex_idx[-2:]))
f.write(self._wait_for_bytes(img_size - num_pkgs * (512 - 6) + 2))
f.close()
# ACK end of data transfer
self._write(commands.ack('f0', 'f0'))
print("taken picture, finish!")
def take_picture(self, name='pic.jpeg'):
# initialize for JPEG, VGA
self._initial()
# set package size to 512 bytes
self._set_pkg_size()
# compresed snapshot pic
self._snapshot()
# get picture (snapshot)
num_pkgs = self._get_picture()
# receive img data pkgs
self._write_picture(num_pkgs, name)
def reset(self):
self._write(commands.reset())
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 22:04:04 2020
@author: giles
"""
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import random
import shutil
import open3d as o3d
def get_object_T_camera(x: float, y: float, z: float) -> np.ndarray:
z_vector = np.array([-x, -y, -z])
e_z = z_vector / np.linalg.norm(z_vector)
x_vector = np.cross(e_z, np.array([0,0,1]))
e_x = x_vector / np.linalg.norm(x_vector)
e_y = np.cross(e_z, e_x)
camera_position = np.array([x,y,z])
object_T_camera = np.c_[e_x, e_y, e_z, camera_position]
return object_T_camera
def spherical_to_cartesian(azimuth: float, elevation: float, distance: float = 1.0):
#if azimuth > 2 * np.pi or elevation > 2 * np.pi:
#warnings.warn('Expects radians, received {} for azimuth and {} for elevation'.format(azimuth, elevation))
z = distance * np.sin(elevation)
d_cos = distance * np.cos(elevation)
x = d_cos * np.cos(azimuth)
y = d_cos * np.sin(azimuth)
return x, y, z
def pose_from_filename(filename: str) -> np.ndarray:
azimuth_degree, elevation_degree = tuple(float(v) for v in filename.split('.')[0].split('_')[-2:])
azimuth_degree *= -10
azimuth_rad, elevation_rad = np.deg2rad(azimuth_degree), np.deg2rad(elevation_degree)
x, y, z = spherical_to_cartesian(azimuth_rad, elevation_rad)
object_T_camera = get_object_T_camera(x, y, z)
return object_T_camera
def RotationMatrix6D(img1,img2):
R01 = pose_from_filename(img1)
R02 = pose_from_filename(img2)
R12 = np.transpose(R01[:,0:3]) @ R02[:,0:3]
T12 = np.subtract(R02[:,3],R01[:,3])
return np.hstack((np.reshape(R12[:,0:2],(1,6)),np.reshape(T12,(1,3))))
def Rfrom6D(tensor):
b1 = np.array([tensor.cpu().numpy()[0],tensor.cpu().numpy()[2],tensor.cpu().numpy()[4]])
a2 = np.array([tensor.cpu().numpy()[1],tensor.cpu().numpy()[3],tensor.cpu().numpy()[5]])
b1 = b1/np.linalg.norm(b1)
b2 = a2 - (b1 @ a2)*b1
b2 = b2/np.linalg.norm(b2)
b3 = np.cross(b1, b2)
return np.c_[b1,b2,b3]
def elvaluateR(groundtruth, outputs):
I = np.identity(3)
error = 0
for i in range(groundtruth.shape[0]):
R_p = Rfrom6D(outputs[i].detach())
R_t = Rfrom6D(groundtruth[i])
error+=np.linalg.norm(R_p @ np.transpose(R_t) - I,'fro')
return error/groundtruth.shape[0]
def splitTrainTest(datadir):
im_list = os.listdir(datadir)
test_list = random.sample(im_list, int(len(im_list)*0.1))
for im in im_list:
if im not in test_list:
shutil.move(os.path.join(datadir,im),'./dataset/train')
else:
shutil.move(os.path.join(datadir,im),'./dataset/test')
#visualize3D(filename_list[0][0],filename_list[0][1],outputs[0].detach())
#output = outputs[i].detach()
#black points represent the two iuput camera pose, red point represents the estimated camera2.
def visualize3D(img1,img2,output):
sphere_points=[]
for x in range(0,100):
y = np.sqrt(10000-(x*x))
sphere_points.append(np.array([x/100,y/100,0]))
sphere_points.append(np.array([-x/100,y/100,0]))
sphere_points.append(np.array([x/100,-y/100,0]))
sphere_points.append(np.array([-x/100,-y/100,0]))
world_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0,0,0])
R01 = pose_from_filename(img1)
R02 = pose_from_filename(img2)
R12 = Rfrom6D(output)
R12 = R01[:,0:3] @ R12
R12 = np.hstack((R12,np.array([[output.numpy()[6]+R01[0][3]],[output.numpy()[7]+R01[1][3]],[output.numpy()[8]+R01[2][3]]])))
R12 = np.vstack((R12,np.array([0,0,0,1])))
camera1 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0,0,0])
camera2 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0,0,0])
camera_12 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.2, origin=[0,0,0])
camera1 = camera1.transform(np.vstack((R01,np.array([0,0,0,1]))))
camera2 = camera2.transform(np.vstack((R02,np.array([0,0,0,1]))))
camera_12 = camera_12.transform(R12)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(sphere_points)
ori_c1 = o3d.geometry.PointCloud()
ori_c1.points = o3d.utility.Vector3dVector([np.array([R01[0][3],R01[1][3],R01[2][3]+0.1])])
ori_c1.colors = o3d.utility.Vector3dVector([np.array([0,0,0])])
ori_c2 = o3d.geometry.PointCloud()
ori_c2.points = o3d.utility.Vector3dVector([np.array([R02[0][3],R02[1][3],R02[2][3]+0.1])])
ori_c2.colors = o3d.utility.Vector3dVector([np.array([0,0,0])])
ori_pre = o3d.geometry.PointCloud()
ori_pre.points = o3d.utility.Vector3dVector([np.array([R12[0][3],R12[1][3],R12[2][3]+0.1])])
ori_pre.colors = o3d.utility.Vector3dVector([np.array([255,0,0])])
o3d.visualization.draw_geometries([world_frame, camera1, camera2,camera_12,pcd,ori_c1,ori_c2,ori_pre])
### test single image ###
def single_RotationMatrix6D(img1,img2):
R01 = pose_from_filename(img1)
T01 = R01[:,3]
return np.hstack((np.reshape(R01[:,0:2],(1,6),'F'),np.reshape(T01,(1,3))))
def singleimageR(filename_list):
gt = np.zeros((len(filename_list),9))
for i in range(len(filename_list)):
gt[i,:] = single_RotationMatrix6D(filename_list[i][0],filename_list[i][1])
return torch.from_numpy(gt)
### |
# the test process for the spongebob character classifier
from services.data_preprocessor_service import DataPreprocessorService
from classifiers.spongebob_character_classifier import SpongebobCharacterClassifier
from models import data_model, fully_connected_layer_model, activation_layer_model
class SpongebobCharacterClassifierTest:
def __init__(self, num_classes: int, num_iters: int):
data = {
'x_train': None,
'y_train': None,
'x_val': None,
'y_val': None
}
self.num_iters = num_iters
self.num_classes = num_classes
data['x_train'], data['y_train'] = self.load_imagesets('train')
data['x_val'], data['y_val'] = self.load_imagesets('cv')
data['x_test'], data['y_test'] = self.load_imagesets('test')
self.data_model = data_model.DataModel(data, num_classes, [100, 100])
self.learning_rate = 0.01
def run(self):
fc_layer_1 = fully_connected_layer_model.FullyConnectedLayerModel(30000, 10, 'fc1', self.learning_rate)
activation_layer_1 = activation_layer_model.ActivationLayerModel('relu', 'output_activation')
output_fc = fully_connected_layer_model.FullyConnectedLayerModel(10, self.num_classes, 'fc2', self.learning_rate)
output_activation = activation_layer_model.ActivationLayerModel('softmax', 'output_activation')
# layers list
layers = [
fc_layer_1,
activation_layer_1,
output_fc,
output_activation
]
# instantiate classifier model
classifier_model = SpongebobCharacterClassifier(self.data_model, self.num_iters, layers, 0.1)
# train model
classifier_model.train(classifier_model.data.x_train, classifier_model.data.y_train)
return classifier_model
def load_imagesets(self, training_phase):
imagesets = DataPreprocessorService.load_imagesets(training_phase)
imageset = DataPreprocessorService.merge_imagesets(imagesets)
shuffled_imageset = DataPreprocessorService.unison_shuffle_images_labels(imageset['x'], imageset['y'])
return shuffled_imageset['x'], shuffled_imageset['y']
|
# balance = 4213
# annualInterestRate = 0.2
# monthlyPaymentRate = 0.04
balance = 4842
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
monthlyInterestRate = annualInterestRate / 12.0
totalPaid = 0
for i in range(1, 13):
minimumMonthlyPayment = monthlyPaymentRate * balance
monthlyUnpaidBalance = balance - minimumMonthlyPayment
totalPaid += minimumMonthlyPayment
balance = monthlyUnpaidBalance + monthlyInterestRate * monthlyUnpaidBalance
print "Month:", i
print "Minimum monthly payment:", round(minimumMonthlyPayment, 2)
print "Remaining balance:", round(balance, 2)
print "Total paid:", round(totalPaid, 2)
print "Remaining balance:", round(balance, 2) |
from flask import Flask, render_template, url_for, request, session, redirect
from flask.helpers import flash
from flask_pymongo import PyMongo
import bcrypt
app = Flask(__name__)
app.config["BUNDLE_ERRORS"] = True
app.secret_key = 'mysecret'
app.config['MONGO_DBNAME'] = 'Fatema'
app.config['MONGO_URI'] = 'mongodb+srv://Fatema:Fatema786@login.tli64.mongodb.net/test'
mongo = PyMongo(app)
@app.route('/')
def index():
if 'username' in session:
return redirect(url_for('dashboard'))
return render_template('index.html')
@app.route('/login', methods=['POST'])
def login():
users = mongo.db.users
login_user = users.find_one({'name' : request.form['username']})
if login_user:
if login_user['name'] == request.form['username'] and login_user['password'] == request.form['pass']:
session['username'] = request.form['username']
return redirect(url_for('dashboard'))
else:
flash('*Invalid Username/Password')
return redirect(url_for('index'))
else:
flash('*Invalid Username/Password')
return redirect(url_for('index'))
@app.route('/register', methods=['POST', 'GET'])
def register():
if request.method == 'POST':
users = mongo.db.users
existing_user = users.find_one({'name' : request.form['username']})
if existing_user is None:
users.insert({'name' : request.form['username'], 'password' :request.form['pass']})
session['username'] = request.form['username']
return redirect(url_for('dashboard'))
else:
flash('Email address already exists')
return render_template('index.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard_page.html')
@app.after_request
def add_header(response):
response.cache_control.max_age = 300
return response
@app.route('/logout')
def logout():
session.pop('username',None)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
#host="0.0.0.0"
# elif login_user['username'] and login_user['password'] == '':
# flash('*Username/password cannot be empty')
# return redirect(url_for('index')) |
###################################
#_SIMPLE PYTHON CALCULATOR PROGRAM_#
###################################
#################
#Basic Functions#
#################
# ADDITION
def add(x, y):
return x + y
# SUBTRACTION
def sub(x, y):
return x - y
# MULTIPLICATION
def mult(x, y):
return x * y
# DIVISION
def div(x, y):
return x / y
# EXPONENT
def expo(x, y):
return x ** y
#######################
# Calculator Function #
#######################
def calculate():
while True:
print()
print('You have the option of: ')
print('A. - Addition')
print('B. - Subtraction')
print('C. - Multiplication')
print('D. - Division')
print('E. - Exponentiation')
print()
user_selection = input('Please enter a selection (A, S, M, D, E): ')
choice = user_selection.upper()
if choice in ('A', 'S', 'M', 'D', 'E'):
num_1 = float(input('Please input first number: '))
num_2 = float(input('Please input second number: '))
print()
if choice == 'A':
result = add(num_1, num_2)
print('The result is: ' + str(result))
elif choice == 'S':
result = sub(num_1, num_2)
print('The result is: ' + str(result))
elif choice == 'M':
result = mult(num_1, num_2)
print('The result is: ' + str(result))
elif choice == 'D':
result = div(num_1, num_2)
print('The result is: ' + str(result))
elif choice == 'E':
result = expo(num_1, num_2)
print('The result is: ' + str(result))
else:
print()
print('Invalid Input')
calculate()
|
#-*- coding: utf-8 -*-
import urllib2, simplejson, urllib
BASE_URL = "http://app.tsogu.ru/shedule_new/bin/groups.py"
def get_group_code(gname):
gname = urllib2.quote(gname.encode('utf-8'))
r = urllib2.urlopen("%s?act=groupcode&groupname=%s"%(BASE_URL, gname)).read()
return simplejson.loads(r)
def get_timetable(gcode):
r = urllib2.urlopen("%s?act=json&sgroup=%s"%(BASE_URL, gcode)).read()
return simplejson.loads(r)
def get_institutes():
r = urllib2.urlopen("%s?act=json_institutes"%(BASE_URL)).read()
return simplejson.loads(r)
def get_groups(id_inst):
r = urllib2.urlopen("%s?act=json_groups&id_inst=%s"%(BASE_URL, id_inst)).read()
return simplejson.loads(r)
|
"""Abstract Task class."""
import abc
from typing import Any
class Task(abc.ABC):
"""
Abstract Task class.
A task is one of a list of independent execution tasks that are
submitted to the execution engine to be executed using the execute()
method, commonly in parallel.
"""
def __init__(self):
pass
@abc.abstractmethod
def execute(self) -> Any:
"""Execute the task and return its results."""
|
"""
This script makes a short version of a typical data file exported from ultravision.
Note that the resulting file has been manually edited to fix the # of scan and index points in
the headers...
"""
from packages import utils
import pandas as pd
from os.path import join
from pandas.io.common import EmptyDataError
NHEADER = 19
NDATAKEEP = 10
conf = utils.get_configuration()['paths']['phase2']
fname = join(conf['data_dir'], 'paut_startup', 'experiment', 'halfpath_0skew.txt')
nskip = 0
lines_to_keep = []
# first get all header
try:
while True:
header = pd.read_table(fname,
skiprows=nskip,
nrows=NHEADER,
sep='=',
skipinitialspace=True,
header=None,
engine='c',
index_col=0,
squeeze=True)
parts = header.index.str.split()
# remove unit values from the header labels
header.index = [' '.join([pi for pi in p if not any((c in pi) for c in set('[]()'))])
for p in parts]
lines_to_keep += list(range(nskip, nskip + NHEADER + NDATAKEEP))
nskip += int(header['ScanQty']) * int(header['IndexQty']) + NHEADER
print(lines_to_keep)
except EmptyDataError:
pass
# not open file and read line by line, selectively
with open(fname, 'r') as fid:
content = fid.readlines()
with open('ultravision_example_pa.txt', 'w') as fid:
fid.writelines([content[l] for l in lines_to_keep]) |
from controllers.historyController import HistoryController
import sys
import logging
if __name__ == "__main__":
log = logging.getLogger("log")
log.addHandler(logging.FileHandler("log.log", "a"))
log.setLevel(logging.DEBUG)
history = HistoryController(log)
if len(sys.argv) == 2 and sys.argv[1] == "test":
history.test_mode()
while True:
log.info("Starting game")
try:
result = history.start_game()
except KeyboardInterrupt:
log.info("User canceled operation")
result = -1
except Exception as e:
log.info("Got unexpected exception: %s. Could not recover.", e)
result = 1
if result == 0:
break
|
import jieba
import csv
txt = open("all_out.txt", "r").read()
words = jieba.lcut(txt)
counts = {}
for word in words:
if len(word) == 1: #排除单个字符的分词结果
continue
else:
counts[word] = counts.get(word,0) + 1
items = list(counts.items())
items.sort(key=lambda x:x[1], reverse=True)
for i in range(1000):
word, count = items[i]
print ("{0:<10}{1:>5}".format(word, count))
with open('frequency.csv','w')as writer:
cw = csv.writer(writer,lineterminator = '\n')
cw.writerows(items)
#把这个写入csv
|
from django.contrib import admin
from .models import Organization, Moderator, Subscription, Event, EvalForm
admin.site.register(Organization)
admin.site.register(Moderator)
admin.site.register(Subscription)
admin.site.register(Event)
admin.site.register(EvalForm) |
import random
Symbol = 'QWERTYUIOPASDFGHJKLZXCVBNMqwerty()uiopasdfghjklzxcvbnm123456789!@#$%^&*'
print('''
______________
Password Maker
______________''')
Chars_Password = int(input('How many passwords do you need?'))
Chars_Count = int(input('How many characters in your password?'))
print('This is Your Password :')
for i in range(0,Chars_Password,1) :
password = ''
for r in range(0,Chars_Count,1):
password += random.choice(Symbol)
print(password) |
from setuptools import find_packages, setup
VERSION = "0.0.0"
REQUIRED_PACKAGES = []
with open("requirements.txt", "r") as reqs_txt_file:
REQUIRED_PACKAGES = [line.strip() for line in reqs_txt_file]
REQUIRED_DEV_PACKAGES = []
with open("requirements-dev.txt", "r") as reqs_dev_txt_file:
REQUIRED_DEV_PACKAGES = [line.strip() for line in reqs_dev_txt_file]
setup(
name="vibsym",
version=VERSION,
author="Jonathon Bechtel",
description="A small symmetry package for 2D molecules",
url="https://github.com/jbechtel/vibsym",
packages=find_packages(),
python_requires='==3.8.1',
)
|
# Mobin Anandwala
# 04/24/2017
# This is a conversion to read data from the data port using Python 3.6
import http.client
import urllib.parse
# Exosite Information
Exosite_Server = 'm2.exosite.com'
Exosite_URL = '/api:v1/stack/alias'
Exosite_CIK = 'f7302c574a9cb0a195100ddbe3ca98cf80d08634'
Exosite_Header = {'X-Exosite-CIK': Exosite_CIK, 'content-type': 'application/x-www-form-urlencoded; charset=utf-8'}
# Generate a pi number to upload data
def genPiNumber(PiNumber):
ExoPi = 'Pi' + ' ' + str(PiNumber)
return ExoPi
# Upload your data (based on exosite example python code)
def uploadData(data):
conn = http.client.HTTPConnection(Exosite_Server)
conn.request("POST",Exosite_URL,data,Exosite_Header)
response = conn.getresponse()
return response
# Generate parameters (based on exosite example python)
def generateParams(PiNumber,Current,Voltage,Event):
ExoPi = genPiNumber(PiNumber)
params = urllib.parse.urlencode({(ExoPi + ' ' + 'Current'): Current, (ExoPi + ' ' + 'Voltage'): Voltage, (ExoPi + ' ' + 'Event'): Event})
return params
|
num = int(input('Enter n value : '))
print('The prime numbers are : ')
for i in range(2,num+1):
for j in range(2,int((i/2)+1)):
if i%j == 0:
break
else:
print(i) |
import FWCore.ParameterSet.Config as cms
from SimCalorimetry.HGCalSimProducers.hgcalDigitizer_cfi import HGCAL_noise_fC as _HGCAL_noise_fC
HGCAL_noise_fC = _HGCAL_noise_fC.clone()
|
"""
Author: LRP
Date: 13-03-2015
"""
import contfit
# from degradespec import degrader
import bestfit
import numpy as np
# This is supposed to be the only function to call!
# Then this function calls everything from it
def rsgjanal(ospec, owave, ores, mspec, quiet=True):
"""
Trimmed spectra, nothing more to do than run the routines!
Full artillery!
Make an input file for filenames and resolution for each spectrum
"""
# 1. Find spectral resolution - lets start by assuimng I know this
# 2. Resample models onto ooresbservations
# 3. Degrade model spectrum to match resolution of observations
# -- use Jesus' routine
# 4. Fit the continuum
# 5. Derive stellar parameters ... -- done outside function
# 6. Define the errors ... -- done outside function
# Many of the steps are done before this function ...
# This should be the function to execute all of that!
# It can't be run within a 4D for loop and
# 1.:
# 2.: Resample
# mssam = contfit.specsam(mwave, mspec, owave)
# 3.:
# mdeg = degrader(owave, mspec, mres, ores)
# 4.:
# Test: is contfit to blame for our mismatched parameters?
cft = contfit.contfit2(ores, owave, mspec, ospec)
oscale = ospec * cft(owave)
# Calculate Chisq
chi = bestfit.chicalc(owave, oscale, mspec)
# chi = bestfit.chisq(oscale, np.std(oscale), mspec)
if quiet is not True:
return oscale, chi, cft
return oscale, chi
|
from django import forms
class RecipeForm(forms.Form):
recipe = forms.CharField(label='recipe_name', max_length=100)
amount_cal = forms.CharField('amount_cal', max_length=10)
time = forms.DecimalField(min(0))
ingredients = forms.TextInput()
def __unicode__(self):
return self.recipe
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = socket.gethostbyname(socket.gethostname())
port = 8883
s.connect((host, port))
#msg2 = (s.recv(1024)).decode("utf-8")
with open('sample.txt', 'w') as f:
while True:
print('receiving data...')
data = s.recv(1024).decode('ascii')
print( data)
f.write(data)
if not data:
f.close()
break
print (data)
print (data)
s.close()
|
## 329. Longest Increasing Path in a Matrix
#
# Given an integer matrix, find the length of the longest increasing path.
#
# From each cell, you can either move to four directions: left, right, up or down. You may NOT move diagonally or move outside of the boundary (i.e. wrap-around is not allowed).
#
# Example 1:
# Input: nums =
# [
# [9,9,4],
# [6,6,8],
# [2,1,1]
# ]
# Output: 4
# Explanation: The longest increasing path is [1, 2, 6, 9].
#
# Example 2:
# Input: nums =
# [
# [3,4,5],
# [3,2,6],
# [2,2,1]
# ]
# Output: 4
# Explanation: The longest increasing path is [3, 4, 5, 6]. Moving diagonally is not allowed.
##
## approach : DFS with memoization
## reference : https://leetcode.com/problems/longest-increasing-path-in-a-matrix/discuss/78334/Python-solution-memoization-dp-288ms?page=1
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix or not len(matrix) or not len(matrix[0]):
return 0
def dfs(row, col):
if not memo[row][col]:
value = matrix[row][col]
memo[row][col] = 1 + max(
dfs(row-1, col) if row and value > matrix[row-1][col] else 0,
dfs(row+1, col) if row < len(matrix)-1 and value > matrix[row+1][col] else 0,
dfs(row, col-1) if col and value > matrix[row][col-1] else 0,
dfs(row, col+1) if col < len(matrix[0])-1 and value > matrix[row][col+1] else 0
)
return memo[row][col]
memo = [[ 0 for _ in range(len(matrix[0]))] for _ in range(len(matrix)) ]
return max(dfs(row, col) for row in range(len(matrix)) for col in range(len(matrix[0])))
import unittest
class Test(unittest.TestCase):
def test_longestIncreasingPath(self):
test_input = [
[
[9,9,4],
[6,6,8],
[2,1,1]
],
[
[1,2]
]
]
test_result = [4, 2]
for i in range(len(test_input)):
result = Solution().longestIncreasingPath(test_input[i])
self.assertEqual(result, test_result[i])
print(result)
if __name__ == "__main__":
unittest.main() |
# -*- coding: utf-8 -*-
""" Google Earth file output example
required simplekml module.
if you didn't install simplekml, execute the following command.
> pip install simplekml
"""
from OpenVerne import IIP
import numpy as np
import pandas as pd
import simplekml
import warnings
warnings.filterwarnings('ignore')
if __name__ == '__main__':
posLLH_ = np.array([35.0, 140.0, 100])
velNED_ = np.array([10, 0, 0])
_IIP = IIP(posLLH_, velNED_)
# print(_IIP)
_IIP.disp()
kml_points = [["satrt", posLLH_[0], posLLH_[1], posLLH_[2]],
["IIP", _IIP.posLLH_IIP_deg[0], _IIP.posLLH_IIP_deg[1], 0]]
kml = simplekml.Kml()
for point in kml_points:
p = kml.newpoint(name=point[0], coords=[(point[2], point[1], point[3])])
p.altitudemode = simplekml.AltitudeMode.absolute
p.lookat.latitude = point[1]
p.lookat.longitude = point[2]
kml.save("test.kml")
print("kml file outputted.")
|
from typing import List
from hummingbot.client.config.config_helpers import ClientConfigAdapter, get_connector_class
from hummingbot.client.settings import AllConnectorSettings
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import PaperTradeExchange
from hummingbot.core.data_type.order_book_tracker import OrderBookTracker
def get_order_book_tracker(connector_name: str, trading_pairs: List[str]) -> OrderBookTracker:
conn_setting = AllConnectorSettings.get_connector_settings()[connector_name]
try:
connector_instance = conn_setting.non_trading_connector_instance_with_default_configuration(
trading_pairs=trading_pairs)
return connector_instance.order_book_tracker
except Exception as exception:
raise Exception(f"Connector {connector_name} OrderBookTracker class not found ({exception})")
def create_paper_trade_market(exchange_name: str, client_config_map: ClientConfigAdapter, trading_pairs: List[str]):
tracker = get_order_book_tracker(connector_name=exchange_name, trading_pairs=trading_pairs)
return PaperTradeExchange(client_config_map,
tracker,
get_connector_class(exchange_name),
exchange_name=exchange_name)
|
from typing import Tuple, Any
from jax import numpy as jnp, random
from flax import linen as nn
from survae.distributions import Distribution
from survae.utils import *
from functools import partial
from jax.scipy.stats import norm
import ipdb
class StandardNormal(nn.Module, Distribution):
@classmethod
def log_prob(cls, x, params, *args, **kwargs):
return sum_except_batch(norm.logpdf(x))
@classmethod
def sample(cls, rng, num_samples, params, shape=None, *args, **kwargs):
if shape == None:
shape = params.shape
return random.normal(rng, (num_samples,)+shape)
class StandardNormal2d(nn.Module, Distribution):
@classmethod
def log_prob(cls, x, params, *args, **kwargs):
return sum_except_batch(norm.logpdf(x))
@classmethod
def sample(cls, rng, num_samples, params, *args, **kwargs):
shape = params.shape
return random.normal(rng, (num_samples,)+(shape))
class MeanNormal(nn.Module, Distribution):
@classmethod
def log_prob(cls, x, params, *args, **kwargs):
return sum_except_batch(norm.logpdf(x, loc=params))
@classmethod
def sample(cls, rng, num_samples, params, shape=None, *args, **kwargs):
if shape == None:
shape = params.shape
return random.normal(rng, (num_samples,)+shape) + params
class Normal(nn.Module, Distribution):
@classmethod
def log_prob(cls, x, params, axis=1, *args, **kwargs):
mean, log_std = jnp.split(params, 2, axis=axis)
return sum_except_batch(norm.logpdf(x, loc=mean, scale=jnp.exp(log_std)))
@classmethod
def sample(cls, rng, num_samples, params, shape=None, axis=1, *args, **kwargs):
mean, log_std = jnp.split(params, 2, axis=axis)
if shape == None:
shape = mean.shape
return random.normal(rng, (num_samples,)+shape) * jnp.exp(log_std) + mean
class DiagonalNormal(Distribution):
"""A multivariate Normal with diagonal covariance."""
@classmethod
def log_prob(cls, x, params):
loc = params["loc"].reshape((1, -1, 1, 1))
log_scale = params["log_scale"].reshape((1, -1, 1, 1))
return sum_except_batch(norm.logpdf(x, loc, jnp.exp(jnp.tanh(log_scale))))
@classmethod
def sample(cls, rng, num_samples, params, shape=None):
loc = params["loc"].reshape((1, -1, 1, 1))
log_scale = params["log_scale"].reshape((1, -1, 1, 1))
return loc + jnp.exp(jnp.tanh(log_scale)) * random.normal(rng, (num_samples,)+(shape))
class StandardHalfNormal(Distribution):
"""A standard half-Normal with zero mean and unit covariance."""
@classmethod
def log_prob(cls, x, params):
log_scaling = math.log(2)
log_base = - 0.5 * math.log(2 * math.pi)
log_inner = - 0.5 * x**2
log_probs = log_scaling+log_base+log_inner
log_probs = jnp.where(x < 0, -math.inf, log_probs)
return sum_except_batch(log_probs)
@classmethod
def sample(cls, rng, num_samples, params):
shape = params.shape
return jnp.abs(random.normal(rng, (num_samples,)+(shape)))
class ConditionalNormal(nn.Module, Distribution):
features: int
kernel_size: tuple
@staticmethod
def _setup(features,kernel_size):
return partial(ConditionalNormal, features=features, kernel_size=kernel_size)
def setup(self):
self.conv_cond1 = nn.Conv(features=self.features * 2, kernel_size=self.kernel_size)
self.conv_cond2 = nn.Conv(features=self.features * 2, kernel_size=self.kernel_size)
@nn.compact
def __call__(self, x, cond, *args, **kwargs):
return self.log_prob(x, cond=cond)
def log_prob(self, x, cond, *args, **kwargs):
if cond != None:
cond = jnp.transpose(cond,(0,2,3,1))
cond = self.conv_cond2(jnp.tanh(self.conv_cond1(cond)))
cond = jnp.transpose(cond,(0,3,1,2))
mean, log_std = jnp.split(cond, 2, axis=1)
return sum_except_batch(norm.logpdf(x, loc=mean, scale=jnp.exp(log_std)))
def sample(self, rng, num_samples, cond, shape=None, *args, **kwargs):
if cond != None:
cond = jnp.transpose(cond,(0,2,3,1))
cond = self.conv_cond2(jnp.tanh(self.conv_cond1(cond)))
cond = jnp.transpose(cond,(0,3,1,2))
mean, log_std = jnp.split(cond, 2, axis=1)
if shape == None:
shape = mean.shape
return random.normal(rng, (num_samples,) + shape) * jnp.exp(log_std) + mean
|
name= input ("What is your name?")
day= input("what day is today?")
print("Hello, " + name + "! Happy " + day + "!")
print("Hello, {}! Happy {}!".format(name,day))
|
#!/usr/bin/env python
import sys,os,getopt,urllib,httplib,time
try:
import json
except ImportError:
import simplejson as json
#DBS-3 imports
from dbs.apis.dbsClient import *
from sso_auth import Login
OUTPUT='/afs/cern.ch/user/m/mtaze/TransferTeam/deletion_campaign/out/'
# arguments
datasetRegexList=None
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["dataset="])
except getopt.GetoptError:
print >> sys.stderr, 'Failed to parse options!'
sys.exit(2)
# check command line parameter
for opt, arg in opts :
if opt == "--dataset":
datasetRegexList = arg
if datasetRegexList == None:
print >> sys.stderr, 'Please specify dataset(regex) with --dataset option'
sys.exit(2)
def to_GB(amount):
return "%.3f" % (amount / float(1000 ** 3))
url='https://cmsweb.cern.ch/dbs/prod/global/DBSReader'
dbs3api = DbsApi(url=url)
ssoLogin = Login()
try:
# split comma separated dataset
for datasetRegex in datasetRegexList.split(','):
datasetList = dbs3api.listDatasets(dataset_access_type='VALID',detail=1,dataset=datasetRegex)
for dataset in datasetList:
if dataset['data_tier_name'].upper() == "RAW":
continue
dsName = dataset['dataset']
dsCreationDate = time.strftime('%Y%m%d', time.gmtime(dataset['creation_date']))
dsProcessedName = dataset['processed_ds_name']
dsPopularity = []
dsSize = dbs3api.listBlockSummaries(dataset=dsName)[0]['file_size']
# remove version number
if dsProcessedName[-3:-1] == '-v':
dsProcessedName = dsProcessedName[:-3]
dsProcessedName += ".txt"
# if there is no popularity info for the dataset, it doesn't return a json object
url = 'https://cms-popularity.cern.ch/popdb/popularity/getSingleDSstat?orderby=naccess&aggr=year&name=%s' % dsName
try:
result = json.loads(ssoLogin.getUrl(url))
for popularity in result['data'][0]['data']:
dsPopularity.append('%s:%s' % (time.strftime('%Y', time.gmtime(popularity[0]/1000.0)), popularity[1]))
except Exception as e:
dsPopularity.append('NoRecord')
with open(OUTPUT + dsProcessedName, 'a') as file:
file.write("dataset: %s size(GB): %s creation_date: %s popularity: %s\n" % (dsName, to_GB(dsSize), dsCreationDate, ','.join(dsPopularity)))
print "dataset: %s size(GB): %s creation_date: %s popularity: %s" % (dsName, to_GB(dsSize), dsCreationDate, ','.join(dsPopularity))
except Exception as e:
print >> sys.stderr, 'error:',e
|
import numpy as np
import matplotlib.pyplot as plt
import os
from keras_utils import save_step_prediction
try:
import tensorflow as tf
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras.layers import Input, Add, Flatten, Conv2D, MaxPooling2D, Activation, Dropout, Dense,\
BatchNormalization, SpatialDropout2D, GlobalAveragePooling2D, ZeroPadding2D, AveragePooling2D,\
Conv2DTranspose, concatenate
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.applications import VGG16, InceptionV3
from tensorflow.keras import backend as K
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.models import load_model
except:
import tensorflow as tf
from tensorflow.python.keras.models import Sequential, load_model, Model
from tensorflow.python.keras.layers.core import Input, Add, Flatten, Conv2D, MaxPooling2D, Activation, Dropout,\
Dense, BatchNormalization, SpatialDropout2D, GlobalAveragePooling2D, ZeroPadding2D, AveragePooling2D,\
Conv2DTranspose, concatenate
from tensorflow.python.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.python.keras.applications import VGG16, InceptionV3
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.initializers import glorot_uniform
from tensorflow.python.keras.models import load_model
from skimage.transform import resize
import nibabel as nib
class ModelContainer:
def __init__(self, hyperparameters, current_task):
self.hyperparameters = hyperparameters
self.model = None
self.model_history = None
self.current_task = current_task
self.step_num = None
self.fold_num = None
self.data_loader = None
self.predict_loader = None
self.get_model()
def get_unet(self):
input_shape = (self.hyperparameters['input_shape'][0], self.hyperparameters['input_shape'][1], 2) if self.hyperparameters.get('autocontext_step') \
else self.hyperparameters['input_shape']
inputs = Input(input_shape)
conv1 = Conv2D(self.hyperparameters['base'], (3, 3), padding='same')(inputs)
if self.hyperparameters['batch_norm']:
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
conv1 = Conv2D(self.hyperparameters['base'], (3, 3), padding='same')(conv1)
if self.hyperparameters['batch_norm']:
conv1 = BatchNormalization()(conv1)
conv1 = Activation('relu')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
if self.hyperparameters['dropout'] != 0:
pool1 = Dropout(self.hyperparameters['dropout'])(pool1)
conv2 = Conv2D(self.hyperparameters['base'] * 2, (3, 3), padding='same')(pool1)
if self.hyperparameters['batch_norm']:
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
conv2 = Conv2D(self.hyperparameters['base'] * 2, (3, 3), padding='same')(conv2)
if self.hyperparameters['batch_norm']:
conv2 = BatchNormalization()(conv2)
conv2 = Activation('relu')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
if self.hyperparameters['dropout'] != 0:
pool2 = Dropout(self.hyperparameters['dropout'])(pool2)
conv3 = Conv2D(self.hyperparameters['base'] * 4, (3, 3), padding='same')(pool2)
if self.hyperparameters['batch_norm']:
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
conv3 = Conv2D(self.hyperparameters['base'] * 4, (3, 3), padding='same')(conv3)
if self.hyperparameters['batch_norm']:
conv3 = BatchNormalization()(conv3)
conv3 = Activation('relu')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
if self.hyperparameters['dropout'] != 0:
pool3 = Dropout(self.hyperparameters['dropout'])(pool3)
conv4 = Conv2D(self.hyperparameters['base'] * 8, (3, 3), padding='same')(pool3)
if self.hyperparameters['batch_norm']:
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
conv4 = Conv2D(self.hyperparameters['base'] * 8, (3, 3), padding='same')(conv4)
if self.hyperparameters['batch_norm']:
conv4 = BatchNormalization()(conv4)
conv4 = Activation('relu')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
if self.hyperparameters['dropout'] != 0:
pool4 = Dropout(self.hyperparameters['dropout'])(pool4)
conv5 = Conv2D(self.hyperparameters['base'] * 16, (3, 3), padding='same')(pool4)
if self.hyperparameters['batch_norm']:
conv5 = BatchNormalization()(conv5)
conv5 = Activation('relu')(conv5)
conv5 = Conv2D(self.hyperparameters['base'] * 16, (3, 3), padding='same')(conv5)
if self.hyperparameters['batch_norm']:
conv5 = BatchNormalization()(conv5)
conv5 = Activation('relu')(conv5)
up6 = concatenate([Conv2DTranspose(self.hyperparameters['base'] * 8, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(self.hyperparameters['base'] * 8, (3, 3), padding='same')(up6)
if self.hyperparameters['batch_norm']:
conv6 = BatchNormalization()(conv6)
conv6 = Activation('relu')(conv6)
conv6 = Conv2D(self.hyperparameters['base'] * 8, (3, 3), padding='same')(conv6)
if self.hyperparameters['batch_norm']:
conv6 = BatchNormalization()(conv6)
conv6 = Activation('relu')(conv6)
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(self.hyperparameters['base'] * 4, (3, 3), padding='same')(up7)
if self.hyperparameters['batch_norm']:
conv7 = BatchNormalization()(conv7)
conv7 = Activation('relu')(conv7)
conv7 = Conv2D(self.hyperparameters['base'] * 4, (3, 3), padding='same')(conv7)
if self.hyperparameters['batch_norm']:
conv7 = BatchNormalization()(conv7)
conv7 = Activation('relu')(conv7)
up8 = concatenate([Conv2DTranspose(self.hyperparameters['base'] * 2, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(self.hyperparameters['base'] * 2, (3, 3), padding='same')(up8)
if self.hyperparameters['batch_norm']:
conv8 = BatchNormalization()(conv8)
conv8 = Activation('relu')(conv8)
conv8 = Conv2D(self.hyperparameters['base'] * 2, (3, 3), padding='same')(conv8)
if self.hyperparameters['batch_norm']:
conv8 = BatchNormalization()(conv8)
conv8 = Activation('relu')(conv8)
up9 = concatenate([Conv2DTranspose(self.hyperparameters['base'], (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(self.hyperparameters['base'], (3, 3), padding='same')(up9)
if self.hyperparameters['batch_norm']:
conv9 = BatchNormalization()(conv9)
conv9 = Activation('relu')(conv9)
conv9 = Conv2D(self.hyperparameters['base'], (3, 3), padding='same')(conv9)
if self.hyperparameters['batch_norm']:
conv9 = BatchNormalization()(conv9)
conv9 = Activation('relu')(conv9)
conv10 = Conv2D(self.hyperparameters['last_layer_units'], (1, 1), activation=self.hyperparameters['last_layer_activation'])(conv9)
if self.hyperparameters.get('use_weight_maps'):
weight_input = Input(self.hyperparameters['input_shape'])
self.model = Model(inputs=[inputs, weight_input], outputs=[conv10])
loss = self.hyperparameters['loss_func'](weight_input, self.hyperparameters['weight_strength'])
else:
self.model = Model(inputs=[inputs], outputs=[conv10])
loss = self.hyperparameters['loss_func']
print(self.model.summary())
self.model.compile(loss=loss,
optimizer=self.hyperparameters['optimizer'](lr=self.hyperparameters['lr']),
metrics=self.hyperparameters['metrics_func'])
return self.model
def plot_history(self):
if not os.path.isdir(os.path.join(os.getcwd(), 'results')):
os.mkdir(os.path.join(os.getcwd(), 'results'))
task_path = str(self.current_task)
if self.step_num is not None:
task_path += '_step' + str(self.step_num)
if self.fold_num is not None:
task_path += '_fold' + str(self.fold_num)
fig = plt.figure(figsize=(4, 4))
plt.title("Learning Curve")
plt.plot(self.model_history.history["loss"], label="loss")
plt.plot(self.model_history.history["val_loss"], label="val_loss")
plt.plot(np.argmin(self.model_history.history["val_loss"]),
np.min(self.model_history.history["val_loss"]),
marker="x", color="r", label="best model")
plt.xlabel("Epochs")
plt.ylabel("Loss Value")
plt.legend()
result_path = os.path.join(os.path.join(os.getcwd(), 'results'), task_path + '_loss.png')
fig.savefig(result_path, dpi=fig.dpi)
metric_key = ''
metric_val_key = ''
fig = plt.figure(figsize=(4, 4))
plt.title("Metrics Curves")
for metric in self.hyperparameters['metrics']:
for key in self.model_history.history:
if "val" not in key and metric in key:
metric_key = key
if "val" in key and metric in key:
metric_val_key = key
if metric_key != '' and metric_val_key != '':
plt.plot(self.model_history.history[metric_key], label=metric_key)
plt.plot(self.model_history.history[metric_val_key], label=metric_val_key)
# plt.plot(np.argmax(history.history[metric_val_key]),
# np.max(history.history[metric_val_key]),
# marker="x", color="r", label="best model")
plt.xlabel("Epochs")
plt.ylabel("Metrics Value")
plt.legend()
result_path = os.path.join(os.path.join(os.getcwd(), 'results'), task_path + '_metrics.png')
fig.savefig(result_path, dpi=fig.dpi)
def save_model(self):
if not os.path.isdir(os.path.join(os.getcwd(), 'models')):
os.mkdir(os.path.join(os.getcwd(), 'models'))
task_path = str(self.current_task)
if self.step_num is not None:
task_path += '_step' + str(self.step_num)
if self.fold_num is not None:
task_path += '_fold' + str(self.fold_num)
model_path = os.path.join(os.path.join(os.getcwd(), 'models'), task_path + '.h5')
self.model.save(model_path)
def load_saved_model(self):
model_path = os.path.join(os.path.join(os.getcwd(), 'models'), self.hyperparameters['use_model'])
if not os.path.isfile(model_path):
raise ValueError("Model name is not correct")
custom_objects = {}
custom_objects[self.hyperparameters['loss']] = self.hyperparameters['loss_func']
for i, metric in enumerate(self.hyperparameters['metrics']):
custom_objects[metric] = self.hyperparameters['metrics_func'][i]
self.model = load_model(model_path, custom_objects=custom_objects)
return self.model
def normal_train(self):
training_generator, validation_generator = self.data_loader.get_generators()
# for batch_x, batch_y in training_generator:
# plot_pair(batch_x[0,:,:,0], batch_y[0,:,:,0])
# plot_pair(batch_x[0, :, :, 0], batch_y[0, :, :, 1])
# plot_pair(batch_x[0, :, :, 0], batch_y[0, :, :, 2])
self.model_history = self.model.fit_generator(training_generator,
epochs=self.hyperparameters['epochs'],
validation_data=validation_generator)
self.plot_history()
if self.hyperparameters.get('save_model'):
self.save_model()
def kfold_train(self):
for fold_num in range(self.hyperparameters['folds']):
self.fold_num = fold_num
training_generator, validation_generator = self.data_loader.get_generators(fold_num=self.fold_num, step_num=self.step_num)
self.model_history = self.model.fit_generator(training_generator,
epochs=self.hyperparameters['epochs'],
validation_data=validation_generator)
self.plot_history()
if self.hyperparameters.get('save_model'):
self.save_model()
def autcontext_train(self):
autocontext_step = self.hyperparameters['autocontext_step']
model_predictions = [None] * len(
os.listdir(os.path.join(os.getcwd(), os.path.join(self.hyperparameters['data_path'], 'Image'))))
for step_num in range(0, autocontext_step):
self.step_num = step_num
for fold_num in range(self.hyperparameters['folds']):
self.fold_num = fold_num
training_generator, validation_generator = self.data_loader.get_generators(fold_num, step_num)
self.model_history = self.model.fit_generator(training_generator,
epochs=self.hyperparameters['epochs'],
validation_data=validation_generator)
self.plot_history()
if self.hyperparameters.get('save_model'):
self.save_model()
y_pred = self.model.predict(validation_generator)
total_val = len(validation_generator.image_filenames)
model_predictions[(fold_num * total_val):((fold_num + 1) * total_val)] = y_pred
save_step_prediction(model_predictions, step_num)
def train(self, dataloader):
self.data_loader = dataloader
if self.hyperparameters.get('autocontext_step'):
self.autcontext_train()
elif self.hyperparameters.get('folds'):
self.kfold_train()
else:
self.normal_train()
def predict(self, dataloader, start_pred, num_cases, data_path):
self.predict_loader = dataloader
for i in range(start_pred, start_pred+num_cases):
test_batch, original_shape = self.predict_loader.__getitem__(i)
predictions = self.model.predict_on_batch(test_batch)
output_predictions = []
for prediction in predictions:
prediction = resize(prediction, (original_shape[0], original_shape[1]))
output_predictions.append(np.argmax(prediction, axis=2))
output_predictions = np.array(output_predictions)
affine = np.array([[0., 0., -0.781624972820282, 0.],
[0., -0.781624972820282, 0., 0.],
[-3., 0., 0., 0.],
[0., 0., 0., 1.]])
img = nib.Nifti1Image(output_predictions, affine)
prediction_path = os.path.join(os.getcwd(),
os.path.join(data_path, 'predictions'))
if not os.path.isdir(prediction_path):
os.mkdir(prediction_path)
img.to_filename(os.path.join(prediction_path, 'prediction_' + str(i).zfill(5) + '.nii.gz'))
def get_model(self):
if self.model is not None:
return self.model
if self.hyperparameters.get('use_model'):
return self.load_saved_model()
return self.get_unet()
|
import os
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from slugify import slugify
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
@app.route("/")
def get_items():
items = mongo.db.items.find().sort("name")
return render_template("dictionary.html", items=items)
@app.route("/sok/")
def search_all_items():
items = mongo.db.items.find().sort("name")
return render_template("search-results.html", items=items)
@app.route("/sok/<term>")
def search_items(term):
items = mongo.db.items.find({"$text": {"$search": term}}).sort("name")
return render_template("search-results.html", items=items)
@app.route("/ord/<slug>")
def view_item(slug):
item = mongo.db.items.find_one({"slug": slug})
return render_template("view-item.html", item=item)
@app.route("/skapa-konto", methods=["GET", "POST"])
def create_account():
if request.method == "POST":
# does username alredy exist in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
flash("Användarnamnet finns redan")
return redirect(url_for("create_account"))
account = {
"username": request.form.get("username").lower(),
"name": request.form.get("name"),
"password": generate_password_hash(request.form.get("password"))
}
mongo.db.users.insert_one(account)
session["user"] = request.form.get("username").lower()
flash("Konto skapat")
return render_template("create-account.html")
@app.route("/logga-in", methods=["GET", "POST"])
def login():
if request.method == "POST":
# does username alredy exist in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
if check_password_hash(existing_user["password"],
request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Du är inloggad, {}".format(existing_user["name"]))
return redirect(url_for("get_items"))
else:
# invalid password
flash("Felaktigt Email eller lösenord")
return redirect(url_for("login"))
else:
# invalid username
flash("Felaktig Email eller lösenord")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/nytt-ord", methods=["GET", "POST"])
def create_item():
if request.method == "POST":
# does item-name alredy exist in db
existing_item = mongo.db.items.find_one(
{"name": request.form.get("name")})
if existing_item:
flash("Ordet finns redan med i ordlistan")
return redirect(url_for("create_item"))
dictionary = {
"name": request.form.get("name"),
"short": request.form.get("short"),
"long": request.form.get("long"),
"username": session["user"],
"slug": slugify(request.form.get("name"))
}
if request.form.get("href"):
dictionary["href"] = request.form.get("href")
mongo.db.items.insert_one(dictionary)
flash("Nytt ord tillagt")
return render_template("create-item.html")
@app.route("/min-ordlista")
def my_items():
items = mongo.db.items.find(
{"username": session["user"]}
).sort("name")
return render_template("mydictionary.html", items=items)
@app.route("/logga-ut")
def logout():
flash("Du har blivit utloggad")
session.clear()
return redirect(url_for("get_items"))
@app.route("/ta-bort/<id>")
def delete_item(id):
# delete item from db
mongo.db.items.remove({"_id": ObjectId(id)})
flash("Ordet är borttaget")
return redirect(url_for("my_items"))
@app.route("/redigera/<id>", methods=["GET", "POST"])
def update_item(id):
if request.method == "POST":
# does item-name alredy exist in db
existing_item = mongo.db.items.find_one(
{"name": request.form.get("name")})
if existing_item and ObjectId(id) != existing_item['_id']:
flash("Ordet finns redan med i ordlistan")
item = mongo.db.items.find_one({"_id": ObjectId(id)})
return render_template("update-item.html", item=item)
dictionary = {
"name": request.form.get("name"),
"short": request.form.get("short"),
"long": request.form.get("long"),
"username": session["user"],
"slug": slugify(request.form.get("name")),
}
if request.form.get("href"):
dictionary["href"] = request.form.get("href")
mongo.db.items.update({"_id": ObjectId(id)}, dictionary)
flash("Ordet är sparat")
return redirect(url_for("my_items"))
item = mongo.db.items.find_one({"_id": ObjectId(id)})
return render_template("update-item.html", item=item)
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
|
# -*- coding: utf-8 -*-
'''
Self-Balancing Binary Search Trees
Height = max(LeftSubTreeHeight, RightSubTreeHeight) + 1
Balanced = LeftSubTreeBalance - RightSubTreeBalance
abs(Balance) <= 1 - tree is balanced
Balance < 0 - tree is left heavy
Balance >= 2 - tree is right heavy
https://www.youtube.com/watch?v=vRwi_UcZGjU&t=3s
1. Handle left heavy tree (Balance < 0 or LeftSubTreeHeight > 1 + RightSubTreeHeight):
1.1 Right rotation (LL imbalance):
3 2
/ / \
2 ----------> 1 3
/
1
4 2 2
/ \ / \
2 -----------> 4 ------> 1 4
/ \ /
1 3 3
Right child of 2 goes as left child of 4
node = 4
node.left = 2
node.left.right = 3
temp = node.left.right
node.left.right = node (set 4 as 2 right child)
node.left = temp (set 3 as 4's left child instead of 2)
1.2 Left - Right rotation (LR imbalance)
3 3
/ /
1 --------------> 2 ---------> 1.1
\ /
2 1
2. Handle Right heavy tree (Balance >= 2 or RightSubTreeHeight > 1 + LeftSubTreeHeight):
2.1 Left rotation (RR imbalance):
1 2
\ / \
2 -----------> 1 3
\
3
1 3 3
\ / / \
3 -----------> 1 ---------> 1 4
/ \ \
2 4 2
Left child of 3 goes as right child of 1
node = 1
node.right = 3
node.right.left = 2
temp = node.right.left
node.right.left = 1
node.right = temp
2.2 Right left rotation (RL imbalance)
1 1
\ \
3 --------> 2 --------> 2.1
/ \
2 3
insert
remove
'''
class Node:
def __init__(self, val, left=None, right = None, parent = None):
self.left = left
self.right = right
self.parent = parent
self.val = val
self.height = 0
def __str__(self):
return "{} {}".format(self.val, self.height)
class Tree:
def __init__(self, root=None):
self.root = root
def insert(self, val):
if not self.root:
new_node = Node(val)
self.update_height(new_node)
self.root = new_node
else:
new_node = Node(val)
current = self.root
while True:
if val > current.val:
if not current.right:
current.right = new_node
new_node.parent = current
break
else:
current = current.right
else:
if not current.left:
current.left = new_node
new_node.parent = current
break
else:
current = current.left
self.rebalance(new_node)
def rebalance(self, node):
while node:
self.update_height(node)
balance = self.height(node.left) - self.height(node.right)
if balance < -1:
# right side unbalanced tree
# need to make left rotation
# or right left rotation
print("right heavy tree")
if node.right.right:
self.left_rotation(node)
else:
print("right left rotation")
self.right_rotation(node.right)
self.left_rotation(node)
elif balance > 1:
# left side unbalanced tree
# need to make right rotation
# or left right rotation
print("left heavy tree")
if node.left.left:
self.right_rotation(node)
else:
print("left right rotation")
self.left_rotation(node.left)
self.right_rotation(node)
node = node.parent
def update_height(self, node):
if node:
node.height = max(self.height(node.left),
self.height(node.right)) + 1
def height(self, node):
if not node:
return -1
return node.height
def left_rotation(self, node):
temp = node.right.left
node.right.left = node
node.right.parent = node.parent
if not node.parent:
self.root = node.right
elif node.parent.left == node:
node.parent.left = node.right
elif node.parent.right == node:
node.parent.right = node.right
node.parent = node.right
node.right = temp
self.update_height(node)
self.update_height(node.parent)
def right_rotation(self, node):
temp = node.left.right
node.left.right = node
node.left.parent = node.parent
if not node.parent:
self.root = node.left
elif node.parent.right == node:
node.parent.right = node.left
elif node.parent.left == node:
node.parent.left = node.left
node.parent = node.left
node.left = temp
self.update_height(node)
self.update_height(node.parent)
def __str__(self):
stack = []
current = tree.root
result = []
while stack or current:
if current:
result.append(str(current))
stack.append(current)
current = current.left
else:
current = stack.pop()
current = current.right
return ','.join(result)
tree = Tree()
tree.insert(30)
tree.insert(10)
tree.insert(20)
print(tree)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.