max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
app/tests/articles_test.py | vicky-eve/NewsHighlight | 0 | 6614551 | <reponame>vicky-eve/NewsHighlight
import unittest
from app.models import Articles
class ArticlesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Articles class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_articles = Articles(1,'Engadget', '<NAME>', 'If you missed the sale earlier this month, nows your chance to grab Apples Mac Mini M1 at its best price yet. The compact desktop has returned to a record low of $570, thanks to a discount and a coupon that knocks an additional $80 off the sale price. Youl…','https://www.engadget.com/apples-mac-mini-m1-drops-back-down-to-an-all-time-low-of-570-135002983.html', '2022-03-01T21:07:48Z')
def test_instance(self):
self.assertTrue(isinstance(self.new_articles,Articles))
if __name__ == '__main__':
unittest.main() | import unittest
from app.models import Articles
class ArticlesTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Articles class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_articles = Articles(1,'Engadget', '<NAME>', 'If you missed the sale earlier this month, nows your chance to grab Apples Mac Mini M1 at its best price yet. The compact desktop has returned to a record low of $570, thanks to a discount and a coupon that knocks an additional $80 off the sale price. Youl…','https://www.engadget.com/apples-mac-mini-m1-drops-back-down-to-an-all-time-low-of-570-135002983.html', '2022-03-01T21:07:48Z')
def test_instance(self):
self.assertTrue(isinstance(self.new_articles,Articles))
if __name__ == '__main__':
unittest.main() | en | 0.89106 | Test Class to test the behaviour of the Articles class Set up method that will run before every Test | 3.502494 | 4 |
material-design-entry.py | cptx032/miniprojects | 2 | 6614552 | <reponame>cptx032/miniprojects
# coding: utf-8
from Tkinter import *
class MaterialEntry(Frame):
def __init__(self, *args, **kws):
self.placeholder = kws.pop('placeholder', '')
self.placeholder_color = kws.get('phcolor', '#999')
self.fg_color = kws.get('bg', '#000')
Frame.__init__(self, *args, **kws)
self.entry = Entry(self)
self.entry.configure(
insertwidth=kws.get('insertwidth', 1),
border=kws.get('border', 0),
highlightthickness=kws.get('highlightthickness', 5),
)
self.entry.update_idletasks()
self.canvas = Canvas(self, bd=0, highlightthickness=0, height=2, bg='#ddd', width=self.entry.winfo_width())
_parent_color = self.master['bg']
self.entry.configure(
bg=_parent_color,
highlightcolor=_parent_color
)
self.update_placeholder()
self.entry.bind('<Any-KeyPress>', self._kb_handler, '+')
self.entry.bind('<FocusOut>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<FocusIn>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<1>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<FocusIn>', self._focus_in, '+')
self.entry.bind('<FocusOut>', self._focus_out, '+')
self.entry.grid(pady=0, padx=5, row=0, column=0)
self.canvas.grid(row=1, column=0, sticky=W+E)
def _focus_in(self, event):
self.canvas['bg'] = '#00aacc'
def _focus_out(self, event):
self.canvas['bg'] = '#ddd'
def update_placeholder(self):
print "enter"
if self.entry.get() == '':
self.entry.configure(foreground=self.placeholder_color)
self.entry.insert(0, self.placeholder)
if self.entry.get() == self.placeholder and self.entry['fg'] == self.placeholder_color:
self.entry.icursor(0)
def _kb_handler(self, event):
# if event.keysym is visible key
if self.entry.get() == self.placeholder and self.entry['fg'] == self.placeholder_color:
self.entry['fg'] = self.fg_color
self.entry.delete(0,END)
top = Tk()
Label(top, text="Login", bg=top['bg'], font=('TkDefaultFont',10,'bold')).grid(row=0,column=0,pady=5, padx=5)
e = MaterialEntry(placeholder="Username")
e.grid(row=1, column=0,padx=5, pady=5)
e.entry.focus_force()
f = MaterialEntry(placeholder="Password")
f.grid(row=2, column=0,padx=5, pady=5)
f.focus_force()
Button(top, relief=FLAT, bg='#ddd', text="Ok", width=20,bd=0,highlightthickness=5).grid(row=3, column=0, pady=5, padx=5)
top.bind('<Escape>', lambda e : top.quit(), "+")
top.mainloop()
| # coding: utf-8
from Tkinter import *
class MaterialEntry(Frame):
def __init__(self, *args, **kws):
self.placeholder = kws.pop('placeholder', '')
self.placeholder_color = kws.get('phcolor', '#999')
self.fg_color = kws.get('bg', '#000')
Frame.__init__(self, *args, **kws)
self.entry = Entry(self)
self.entry.configure(
insertwidth=kws.get('insertwidth', 1),
border=kws.get('border', 0),
highlightthickness=kws.get('highlightthickness', 5),
)
self.entry.update_idletasks()
self.canvas = Canvas(self, bd=0, highlightthickness=0, height=2, bg='#ddd', width=self.entry.winfo_width())
_parent_color = self.master['bg']
self.entry.configure(
bg=_parent_color,
highlightcolor=_parent_color
)
self.update_placeholder()
self.entry.bind('<Any-KeyPress>', self._kb_handler, '+')
self.entry.bind('<FocusOut>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<FocusIn>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<1>', lambda e : self.update_placeholder(), "+")
self.entry.bind('<FocusIn>', self._focus_in, '+')
self.entry.bind('<FocusOut>', self._focus_out, '+')
self.entry.grid(pady=0, padx=5, row=0, column=0)
self.canvas.grid(row=1, column=0, sticky=W+E)
def _focus_in(self, event):
self.canvas['bg'] = '#00aacc'
def _focus_out(self, event):
self.canvas['bg'] = '#ddd'
def update_placeholder(self):
print "enter"
if self.entry.get() == '':
self.entry.configure(foreground=self.placeholder_color)
self.entry.insert(0, self.placeholder)
if self.entry.get() == self.placeholder and self.entry['fg'] == self.placeholder_color:
self.entry.icursor(0)
def _kb_handler(self, event):
# if event.keysym is visible key
if self.entry.get() == self.placeholder and self.entry['fg'] == self.placeholder_color:
self.entry['fg'] = self.fg_color
self.entry.delete(0,END)
top = Tk()
Label(top, text="Login", bg=top['bg'], font=('TkDefaultFont',10,'bold')).grid(row=0,column=0,pady=5, padx=5)
e = MaterialEntry(placeholder="Username")
e.grid(row=1, column=0,padx=5, pady=5)
e.entry.focus_force()
f = MaterialEntry(placeholder="Password")
f.grid(row=2, column=0,padx=5, pady=5)
f.focus_force()
Button(top, relief=FLAT, bg='#ddd', text="Ok", width=20,bd=0,highlightthickness=5).grid(row=3, column=0, pady=5, padx=5)
top.bind('<Escape>', lambda e : top.quit(), "+")
top.mainloop() | en | 0.633293 | # coding: utf-8 # if event.keysym is visible key | 2.469611 | 2 |
notebooks/_solutions/case2_biodiversity_analysis23.py | rprops/Python_DS-WS | 65 | 6614553 | survey_data.groupby("name").size().nlargest(8) | survey_data.groupby("name").size().nlargest(8) | none | 1 | 1.506537 | 2 | |
run/project/create/src/main.py | feMoraes0/projects-setup | 0 | 6614554 | #!/usr/bin/python3
import os
from formula import formula
project_name = os.environ.get("PROJECT_NAME")
project_path = os.environ.get("PROJECT_PATH")
framework = os.environ.get("FRAMEWORK")
run = os.environ.get("RUN")
formula.Run(project_name, project_path, framework, run)
| #!/usr/bin/python3
import os
from formula import formula
project_name = os.environ.get("PROJECT_NAME")
project_path = os.environ.get("PROJECT_PATH")
framework = os.environ.get("FRAMEWORK")
run = os.environ.get("RUN")
formula.Run(project_name, project_path, framework, run)
| fr | 0.386793 | #!/usr/bin/python3 | 1.667514 | 2 |
Gui/opensim/Scripts/runTutorialTwo.py | sebastianskejoe/opensim-gui | 34 | 6614555 | <filename>Gui/opensim/Scripts/runTutorialTwo.py
# --------------------------------------------------------------------------- #
# OpenSim: runTutorialTwo.py #
# --------------------------------------------------------------------------- #
# OpenSim is a toolkit for musculoskeletal modeling and simulation, #
# developed as an open source project by a worldwide community. Development #
# and support is coordinated from Stanford University, with funding from the #
# U.S. NIH and DARPA. See http://opensim.stanford.edu and the README file #
# for more information including specific grant numbers. #
# #
# Copyright (c) 2005-2017 Stanford University and the Authors #
# Author(s): <NAME>, <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
# Written by <NAME>, Stanford University
## This example performs the steps of Tutorial Two in scripting form
import os.path
# Define the files and folders we will be using
resourceDir = getResourcesDir()
modelFolder = os.path.join(resourceDir, "Models", "WristModel")
modelName = os.path.join(modelFolder, "wrist.osim")
# Load the model
loadModel(modelName)
# Get a handle to the current model
oldModel = getCurrentModel()
# Create a fresh copy
myModel = modeling.Model(oldModel)
# Initialize the copy, if values needed to be set in state
# pass along the variable myState returned by initSystem
myState = myModel.initSystem()
# Change the name of the model
##myModel.setName("Wrist Tendon Surgery.")
## Change the path points of the ECU_pre-surgery to match the existing ECU_post-surgery muscle
ECU_PRE_pps = myModel.getMuscles().get("ECU_pre-surgery").getGeometryPath().updPathPointSet()
ECU_POST_pps= myModel.getMuscles().get("ECU_post-surgery").getGeometryPath().getPathPointSet()
# Clear all path points from the ECU_pre-surgery path point set
ECU_PRE_pps.clearAndDestroy()
# Add path points from the ECU_post-surgery path to the ECU_pre-surgery path
for i in range(ECU_POST_pps.getSize()):
ECU_PRE_pps.cloneAndAppend(ECU_POST_pps.get(i))
# re-initialize the model now that you changed the path points
myState = myModel.initSystem()
# Get full path name of myModel
fullPathName = myModel.getInputFileName()
# Change the name of the modified model
newName = fullPathName.replace('.osim', '_edited.osim')
myModel.print(newName)
# Load the model in the GUI
loadModel(newName)
## IV. Biomechanical Effects of Tendon Transfer
loadModel(fullPathName)
currentModel = getCurrentModel()
myState = currentModel.initSystem()
# Plot the RF and VASINT fiber lengths with the model in the default pose
plotterPanel = createPlotterPanel("Wrist Deviation Moment vs. Deviation Angle. ")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_pre-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv1, "Before Transfer")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_post-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv2, "After Transfer")
## Effect of Tendon transfer on ECU muscle
# Wrist Moment VS Flexion
plotterPanel = createPlotterPanel("Wrist Moment VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Tendon force VS Flexion
plotterPane2 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# flexion moment arm VS Flexion
plotterPane3 = createPlotterPanel("flexion moment arm VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Create muscle objects for both a ECU pre- & post- surgery
ECUpresurgery = myModel.getMuscles().get("ECU_pre-surgery")
ECUpostsurgery = myModel.getMuscles().get("ECU_post-surgery")
# Find the optimal fibre length of that muscle
optLengthECUpre = ECUpresurgery.getOptimalFiberLength()
optLengthECUpost = ECUpostsurgery.getOptimalFiberLength()
## The Effect of Tendon Slack Length
myModel = getCurrentModel()
# Plot the muscle properties with existing Tendon-slack Length
# Tendon force VS Flexion
plotterPane1 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv1, "ECRB")
# Muscle-tendon length VS Flexion
plotterPane2 = createPlotterPanel("Muscle-tendon length VS Flexion Angle")
crv2 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv2, "ECRB")
# Fibre length VS Flexion
plotterPane3 = createPlotterPanel("Fibre length VS Flexion Angle")
crv3 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv3, "ECRB")
# Changing the optimal fibre length
# Create the ECRB muscle object
ECRB = myModel.getMuscles().get("ECRB")
# Back up the original tendon slack length (just in case)
backupTendonSlackLength = ECRB.getTendonSlackLength()
# Prescribe a new Tendon slack length
ECRB.setTendonSlackLength(0.2105)
# Re-initialize the states
myModel.initSystem()
# Plot the muscle properties with new Tendon-slack Length
# Tendon force VS Flexion
crv4 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv4, "ECRB_0.210")
# Muscle-tendon length VS Flexion
crv5 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv5, "ECRB_0.210")
# Fibre length VS Flexion
crv6 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv6, "ECRB_0.210")
| <filename>Gui/opensim/Scripts/runTutorialTwo.py
# --------------------------------------------------------------------------- #
# OpenSim: runTutorialTwo.py #
# --------------------------------------------------------------------------- #
# OpenSim is a toolkit for musculoskeletal modeling and simulation, #
# developed as an open source project by a worldwide community. Development #
# and support is coordinated from Stanford University, with funding from the #
# U.S. NIH and DARPA. See http://opensim.stanford.edu and the README file #
# for more information including specific grant numbers. #
# #
# Copyright (c) 2005-2017 Stanford University and the Authors #
# Author(s): <NAME>, <NAME> #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# --------------------------------------------------------------------------- #
# Written by <NAME>, Stanford University
## This example performs the steps of Tutorial Two in scripting form
import os.path
# Define the files and folders we will be using
resourceDir = getResourcesDir()
modelFolder = os.path.join(resourceDir, "Models", "WristModel")
modelName = os.path.join(modelFolder, "wrist.osim")
# Load the model
loadModel(modelName)
# Get a handle to the current model
oldModel = getCurrentModel()
# Create a fresh copy
myModel = modeling.Model(oldModel)
# Initialize the copy, if values needed to be set in state
# pass along the variable myState returned by initSystem
myState = myModel.initSystem()
# Change the name of the model
##myModel.setName("Wrist Tendon Surgery.")
## Change the path points of the ECU_pre-surgery to match the existing ECU_post-surgery muscle
ECU_PRE_pps = myModel.getMuscles().get("ECU_pre-surgery").getGeometryPath().updPathPointSet()
ECU_POST_pps= myModel.getMuscles().get("ECU_post-surgery").getGeometryPath().getPathPointSet()
# Clear all path points from the ECU_pre-surgery path point set
ECU_PRE_pps.clearAndDestroy()
# Add path points from the ECU_post-surgery path to the ECU_pre-surgery path
for i in range(ECU_POST_pps.getSize()):
ECU_PRE_pps.cloneAndAppend(ECU_POST_pps.get(i))
# re-initialize the model now that you changed the path points
myState = myModel.initSystem()
# Get full path name of myModel
fullPathName = myModel.getInputFileName()
# Change the name of the modified model
newName = fullPathName.replace('.osim', '_edited.osim')
myModel.print(newName)
# Load the model in the GUI
loadModel(newName)
## IV. Biomechanical Effects of Tendon Transfer
loadModel(fullPathName)
currentModel = getCurrentModel()
myState = currentModel.initSystem()
# Plot the RF and VASINT fiber lengths with the model in the default pose
plotterPanel = createPlotterPanel("Wrist Deviation Moment vs. Deviation Angle. ")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_pre-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv1, "Before Transfer")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECRB+ECRL+ECU_post-surgery+EDCI+EDCL+EDCM+EDCR+EDM+EIP+EPL","flexion")
setCurveLegend(crv2, "After Transfer")
## Effect of Tendon transfer on ECU muscle
# Wrist Moment VS Flexion
plotterPanel = createPlotterPanel("Wrist Moment VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPanel, "moment.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Tendon force VS Flexion
plotterPane2 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane2, "tendon force", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# flexion moment arm VS Flexion
plotterPane3 = createPlotterPanel("flexion moment arm VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_pre-surgery","flexion")
setCurveLegend(crv1, "Pre-Surgery")
crv2 = addAnalysisCurve(plotterPane3, "momentArm.flexion", "ECU_post-surgery","flexion")
setCurveLegend(crv2, "post-surgery")
# Create muscle objects for both a ECU pre- & post- surgery
ECUpresurgery = myModel.getMuscles().get("ECU_pre-surgery")
ECUpostsurgery = myModel.getMuscles().get("ECU_post-surgery")
# Find the optimal fibre length of that muscle
optLengthECUpre = ECUpresurgery.getOptimalFiberLength()
optLengthECUpost = ECUpostsurgery.getOptimalFiberLength()
## The Effect of Tendon Slack Length
myModel = getCurrentModel()
# Plot the muscle properties with existing Tendon-slack Length
# Tendon force VS Flexion
plotterPane1 = createPlotterPanel("Tendon force VS Flexion Angle")
crv1 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv1, "ECRB")
# Muscle-tendon length VS Flexion
plotterPane2 = createPlotterPanel("Muscle-tendon length VS Flexion Angle")
crv2 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv2, "ECRB")
# Fibre length VS Flexion
plotterPane3 = createPlotterPanel("Fibre length VS Flexion Angle")
crv3 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv3, "ECRB")
# Changing the optimal fibre length
# Create the ECRB muscle object
ECRB = myModel.getMuscles().get("ECRB")
# Back up the original tendon slack length (just in case)
backupTendonSlackLength = ECRB.getTendonSlackLength()
# Prescribe a new Tendon slack length
ECRB.setTendonSlackLength(0.2105)
# Re-initialize the states
myModel.initSystem()
# Plot the muscle properties with new Tendon-slack Length
# Tendon force VS Flexion
crv4 = addAnalysisCurve(plotterPane1, "tendon force", "ECRB","flexion")
setCurveLegend(crv4, "ECRB_0.210")
# Muscle-tendon length VS Flexion
crv5 = addAnalysisCurve(plotterPane2, "muscle-tendon length", "ECRB","flexion")
setCurveLegend(crv5, "ECRB_0.210")
# Fibre length VS Flexion
crv6 = addAnalysisCurve(plotterPane3, "fiber-length", "ECRB","flexion")
setCurveLegend(crv6, "ECRB_0.210")
| en | 0.753817 | # --------------------------------------------------------------------------- # # OpenSim: runTutorialTwo.py # # --------------------------------------------------------------------------- # # OpenSim is a toolkit for musculoskeletal modeling and simulation, # # developed as an open source project by a worldwide community. Development # # and support is coordinated from Stanford University, with funding from the # # U.S. NIH and DARPA. See http://opensim.stanford.edu and the README file # # for more information including specific grant numbers. # # # # Copyright (c) 2005-2017 Stanford University and the Authors # # Author(s): <NAME>, <NAME> # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # --------------------------------------------------------------------------- # # Written by <NAME>, Stanford University ## This example performs the steps of Tutorial Two in scripting form # Define the files and folders we will be using # Load the model # Get a handle to the current model # Create a fresh copy # Initialize the copy, if values needed to be set in state # pass along the variable myState returned by initSystem # Change the name of the model ##myModel.setName("Wrist Tendon Surgery.") ## Change the path points of the ECU_pre-surgery to match the existing ECU_post-surgery muscle # Clear all path points from the ECU_pre-surgery path point set # Add path points from the ECU_post-surgery path to the ECU_pre-surgery path # re-initialize the model now that you changed the path points # Get full path name of myModel # Change the name of the modified model # Load the model in the GUI ## IV. Biomechanical Effects of Tendon Transfer # Plot the RF and VASINT fiber lengths with the model in the default pose ## Effect of Tendon transfer on ECU muscle # Wrist Moment VS Flexion # Tendon force VS Flexion # flexion moment arm VS Flexion # Create muscle objects for both a ECU pre- & post- surgery # Find the optimal fibre length of that muscle ## The Effect of Tendon Slack Length # Plot the muscle properties with existing Tendon-slack Length # Tendon force VS Flexion # Muscle-tendon length VS Flexion # Fibre length VS Flexion # Changing the optimal fibre length # Create the ECRB muscle object # Back up the original tendon slack length (just in case) # Prescribe a new Tendon slack length # Re-initialize the states # Plot the muscle properties with new Tendon-slack Length # Tendon force VS Flexion # Muscle-tendon length VS Flexion # Fibre length VS Flexion | 1.364695 | 1 |
venv/Lib/site-packages/langdetect/lang_detect_exception.py | GuilhermeJC13/storIA | 1,269 | 6614556 | <filename>venv/Lib/site-packages/langdetect/lang_detect_exception.py<gh_stars>1000+
_error_codes = {
'NoTextError': 0,
'FormatError': 1,
'FileLoadError': 2,
'DuplicateLangError': 3,
'NeedLoadProfileError': 4,
'CantDetectError': 5,
'CantOpenTrainData': 6,
'TrainDataFormatError': 7,
'InitParamError': 8,
}
ErrorCode = type('ErrorCode', (), _error_codes)
class LangDetectException(Exception):
def __init__(self, code, message):
super(LangDetectException, self).__init__(message)
self.code = code
def get_code(self):
return self.code
| <filename>venv/Lib/site-packages/langdetect/lang_detect_exception.py<gh_stars>1000+
_error_codes = {
'NoTextError': 0,
'FormatError': 1,
'FileLoadError': 2,
'DuplicateLangError': 3,
'NeedLoadProfileError': 4,
'CantDetectError': 5,
'CantOpenTrainData': 6,
'TrainDataFormatError': 7,
'InitParamError': 8,
}
ErrorCode = type('ErrorCode', (), _error_codes)
class LangDetectException(Exception):
def __init__(self, code, message):
super(LangDetectException, self).__init__(message)
self.code = code
def get_code(self):
return self.code
| none | 1 | 2.299616 | 2 | |
termproject_phase1.py | eyesimk/CS411-Cryptography | 0 | 6614557 | import math
import timeit
import random
import sympy
import warnings
from random import randint, seed
import sys
from ecpy.curves import Curve, Point
from Crypto.Hash import SHA3_256
import requests
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Util.Padding import pad
from Crypto.Util.Padding import unpad
import random
import re
import json
API_URL = 'http://cryptlygos.pythonanywhere.com'
stuID = 25308
def random_prime(bitsize):
# random.seed(42)
warnings.simplefilter('ignore')
chck = False
while chck == False:
p = random.randrange(2 ** (bitsize - 1), 2 ** bitsize - 1)
chck = sympy.isprime(p)
warnings.simplefilter('default')
return p
curve = Curve.get_curve('secp256k1')
# TODO: HERE CREATE A LONG TERM KEY
random.seed(42)
sL = randint(1, random_prime(256) - 1)
print("sL: ", sL)
# base point P is the generator
P = curve.generator
lkey = sL * P
print("lkey: ", lkey)
n = curve.order
print("n: ", n)
k = randint(1, n - 2)
print("k: ", k)
R = k * P
print("R: ", R)
r = (R.x) % n
print("r: ", r)
m = "25097"
h_ = SHA3_256.new(b'25097' + r.to_bytes((r.bit_length() + 7) // 8, byteorder='big'))
h = (int.from_bytes(h_.digest(), byteorder='big')) % n
print("h: ", h)
s = (sL * h + k) % n
print("s: ", s)
# print("sL: ", sL)
print("LKey.x: ", lkey.x)
print("LKey.y: ", lkey.y)
# print("LKey: ", lkey)
V = (s * P) - (h * lkey)
print("V: ", V)
v = V.x % n
print("v: ", v)
h_2 = SHA3_256.new(b'25097' + v.to_bytes((v.bit_length() + 7) // 8, byteorder='big'))
h_new = (int.from_bytes(h_2.digest(), byteorder='big')) % n
if h == h_new:
print("true")
else:
print("false")
n = curve.order
# HERE GENERATE A EPHEMERAL KEY
e_sL = randint(1, random_prime(256) - 1)
print("e_sL: ", e_sL)
# base point P is the generator
ekey = e_sL * P
print("e_Lkey: ", ekey)
print("e_Lkey.x: ", ekey.x)
print("e_Lkey.y: ", ekey.y)
# server's long term key
QSer_long = Point(0xc1bc6c9063b6985fe4b93be9b8f9d9149c353ae83c34a434ac91c85f61ddd1e9,
0x931bd623cf52ee6009ed3f50f6b4f92c564431306d284be7e97af8e443e69a8c, curve)
try:
# REGISTRATION
mes = {'ID': stuID, 'h': h, 's': s, 'LKEY.X': lkey.x, 'LKEY.Y': lkey.y}
response = requests.put('{}/{}'.format(API_URL, "RegStep1"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
print(response.json())
print("Enter verification code which is sent to you: ")
code = int(input())
mes = {'ID': stuID, 'CODE': code}
response = requests.put('{}/{}'.format(API_URL, "RegStep3"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
print(response.json())
# STS PROTOCOL
mes = {'ID': stuID, 'EKEY.X': ekey.x, 'EKEY.Y': ekey.y}
response = requests.put('{}/{}'.format(API_URL, "STSStep1&2"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
res = response.json()
#calculate T,K,U
qB = Point(res['SKEY.X'], res['SKEY.Y'], curve)
T = e_sL * qB
print("x coordinate of T: ",T.x)
print("y coordinate of T: ",T.y)
a = "BeYourselfNoMatterWhatTheySay"
U = str(T.x) + str(T.y) + a
U = bytes(U, 'utf-8')
print("U:",U)
K = SHA3_256.new(U)
print("K: ", K)
W1 = str(ekey.x) + str(ekey.y) + str(qB.x) + str(qB.y)
print("W1: ", W1)
#Sign Message
sig_k = randint(1, n - 2)
new_R = sig_k * P
new_r = new_R.x % n
m = bytes(W1, 'utf-8')
byte_r = new_r.to_bytes((new_r.bit_length() + 7) // 8, byteorder='big')
h_3 = SHA3_256.new(m + byte_r)
sig_h = (int.from_bytes(h_3.digest(), byteorder='big'))
sig_h = sig_h % n
sig_s = (sL * sig_h + sig_k) % n
print("sig_s: ", sig_s)
print("sig_h: ", sig_h)
Y1 = 's' + str(sig_s) + 'h' + str(sig_h)
Y1 = bytes(Y1, 'utf-8')
print("plaintext: ", Y1)
#Encryption
crypto = AES.new(K.digest(), AES.MODE_CTR)
Y1 = crypto.encrypt(Y1)
nonce = crypto.nonce
print("Y1: ", Y1)
print("nonce: ", nonce)
final_message = nonce + Y1
print("nonce + y1", final_message)
ctext = int.from_bytes(final_message, byteorder='big')
print("ctext", ctext)
###Send encrypted-signed keys and retrive server's signed keys
mes = {'ID': stuID, 'FINAL MESSAGE': ctext}
response = requests.put('{}/{}'.format(API_URL, "STSStep4&5"), json=mes)
if ((response.ok) == False):
raise Exception(response.json())
ctext = response.json()
#Decrypt
W2 = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
print("Received encrypted ciphertext: ", W2)
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=W2[0:8])
decrypted = crypto.decrypt(W2[8:])
decoded = decrypted.decode('UTF-8')
print("Decrypted text: ", decoded)
message = str(qB.x) + str(qB.y) + str(ekey.x) + str(ekey.y)
message = bytes(message, 'utf-8')
print("The message is:", message)
s_nw = decoded[1:decoded.index('h')]
h_nw = decoded[decoded.index('h') + 1:]
s_nw = int(s_nw)
h_nw = int(h_nw)
#verify
V = (s * P) - (h * lkey)
print("V: ", V)
v = V.x % n
print("v: ", v)
h_2 = SHA3_256.new(b'25097' + v.to_bytes((v.bit_length() + 7) // 8, byteorder='big'))
h_new = (int.from_bytes(h_2.digest(), byteorder='big')) % n
if h == h_new:
print("true")
else:
print("false")
# get a message from server for
mes = {'ID': stuID}
response = requests.get('{}/{}'.format(API_URL, "STSStep6"), json=mes)
ctext = response.json()
print(ctext)
#Decrypt
num = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=num[0:8])
dtext = crypto.decrypt(num[8:])
decoded_dtext = dtext.decode('UTF-8')
print("Decrypted text: ", decoded_dtext)
#Add 1 to random to create the new message and encrypt it
random = decoded_dtext[decoded_dtext.index('.') + 2:]
text = decoded_dtext[:decoded_dtext.index('.') + 1]
#print("Text: ", text)
#print("Random: ", rand)
random = int(random) + 1
text = text + " " + str(random)
print(text)
text = bytes(text, 'utf-8')
crypto = AES.new(K.digest(), AES.MODE_CTR)
ctext = crypto.nonce + crypto.encrypt(text)
ct = int.from_bytes(ctext, byteorder='big')
print("Plaintext: ", text)
# send the message and get response of the server
mes = {'ID': stuID, 'ctext': ct}
response = requests.put('{}/{}'.format(API_URL, "STSStep7&8"), json=mes)
ctext = response.json()
print("Response: ", ctext)
num = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=num[0:8])
dtext = crypto.decrypt(num[8:])
print("Decrypted text: ", dtext.decode('UTF-8'))
decoded_dtext = dtext.decode('UTF-8')
#print(decoded_dtext)
except Exception as e:
print(e)
| import math
import timeit
import random
import sympy
import warnings
from random import randint, seed
import sys
from ecpy.curves import Curve, Point
from Crypto.Hash import SHA3_256
import requests
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Util.Padding import pad
from Crypto.Util.Padding import unpad
import random
import re
import json
API_URL = 'http://cryptlygos.pythonanywhere.com'
stuID = 25308
def random_prime(bitsize):
# random.seed(42)
warnings.simplefilter('ignore')
chck = False
while chck == False:
p = random.randrange(2 ** (bitsize - 1), 2 ** bitsize - 1)
chck = sympy.isprime(p)
warnings.simplefilter('default')
return p
curve = Curve.get_curve('secp256k1')
# TODO: HERE CREATE A LONG TERM KEY
random.seed(42)
sL = randint(1, random_prime(256) - 1)
print("sL: ", sL)
# base point P is the generator
P = curve.generator
lkey = sL * P
print("lkey: ", lkey)
n = curve.order
print("n: ", n)
k = randint(1, n - 2)
print("k: ", k)
R = k * P
print("R: ", R)
r = (R.x) % n
print("r: ", r)
m = "25097"
h_ = SHA3_256.new(b'25097' + r.to_bytes((r.bit_length() + 7) // 8, byteorder='big'))
h = (int.from_bytes(h_.digest(), byteorder='big')) % n
print("h: ", h)
s = (sL * h + k) % n
print("s: ", s)
# print("sL: ", sL)
print("LKey.x: ", lkey.x)
print("LKey.y: ", lkey.y)
# print("LKey: ", lkey)
V = (s * P) - (h * lkey)
print("V: ", V)
v = V.x % n
print("v: ", v)
h_2 = SHA3_256.new(b'25097' + v.to_bytes((v.bit_length() + 7) // 8, byteorder='big'))
h_new = (int.from_bytes(h_2.digest(), byteorder='big')) % n
if h == h_new:
print("true")
else:
print("false")
n = curve.order
# HERE GENERATE A EPHEMERAL KEY
e_sL = randint(1, random_prime(256) - 1)
print("e_sL: ", e_sL)
# base point P is the generator
ekey = e_sL * P
print("e_Lkey: ", ekey)
print("e_Lkey.x: ", ekey.x)
print("e_Lkey.y: ", ekey.y)
# server's long term key
QSer_long = Point(0xc1bc6c9063b6985fe4b93be9b8f9d9149c353ae83c34a434ac91c85f61ddd1e9,
0x931bd623cf52ee6009ed3f50f6b4f92c564431306d284be7e97af8e443e69a8c, curve)
try:
# REGISTRATION
mes = {'ID': stuID, 'h': h, 's': s, 'LKEY.X': lkey.x, 'LKEY.Y': lkey.y}
response = requests.put('{}/{}'.format(API_URL, "RegStep1"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
print(response.json())
print("Enter verification code which is sent to you: ")
code = int(input())
mes = {'ID': stuID, 'CODE': code}
response = requests.put('{}/{}'.format(API_URL, "RegStep3"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
print(response.json())
# STS PROTOCOL
mes = {'ID': stuID, 'EKEY.X': ekey.x, 'EKEY.Y': ekey.y}
response = requests.put('{}/{}'.format(API_URL, "STSStep1&2"), json=mes)
if ((response.ok) == False): raise Exception(response.json())
res = response.json()
#calculate T,K,U
qB = Point(res['SKEY.X'], res['SKEY.Y'], curve)
T = e_sL * qB
print("x coordinate of T: ",T.x)
print("y coordinate of T: ",T.y)
a = "BeYourselfNoMatterWhatTheySay"
U = str(T.x) + str(T.y) + a
U = bytes(U, 'utf-8')
print("U:",U)
K = SHA3_256.new(U)
print("K: ", K)
W1 = str(ekey.x) + str(ekey.y) + str(qB.x) + str(qB.y)
print("W1: ", W1)
#Sign Message
sig_k = randint(1, n - 2)
new_R = sig_k * P
new_r = new_R.x % n
m = bytes(W1, 'utf-8')
byte_r = new_r.to_bytes((new_r.bit_length() + 7) // 8, byteorder='big')
h_3 = SHA3_256.new(m + byte_r)
sig_h = (int.from_bytes(h_3.digest(), byteorder='big'))
sig_h = sig_h % n
sig_s = (sL * sig_h + sig_k) % n
print("sig_s: ", sig_s)
print("sig_h: ", sig_h)
Y1 = 's' + str(sig_s) + 'h' + str(sig_h)
Y1 = bytes(Y1, 'utf-8')
print("plaintext: ", Y1)
#Encryption
crypto = AES.new(K.digest(), AES.MODE_CTR)
Y1 = crypto.encrypt(Y1)
nonce = crypto.nonce
print("Y1: ", Y1)
print("nonce: ", nonce)
final_message = nonce + Y1
print("nonce + y1", final_message)
ctext = int.from_bytes(final_message, byteorder='big')
print("ctext", ctext)
###Send encrypted-signed keys and retrive server's signed keys
mes = {'ID': stuID, 'FINAL MESSAGE': ctext}
response = requests.put('{}/{}'.format(API_URL, "STSStep4&5"), json=mes)
if ((response.ok) == False):
raise Exception(response.json())
ctext = response.json()
#Decrypt
W2 = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
print("Received encrypted ciphertext: ", W2)
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=W2[0:8])
decrypted = crypto.decrypt(W2[8:])
decoded = decrypted.decode('UTF-8')
print("Decrypted text: ", decoded)
message = str(qB.x) + str(qB.y) + str(ekey.x) + str(ekey.y)
message = bytes(message, 'utf-8')
print("The message is:", message)
s_nw = decoded[1:decoded.index('h')]
h_nw = decoded[decoded.index('h') + 1:]
s_nw = int(s_nw)
h_nw = int(h_nw)
#verify
V = (s * P) - (h * lkey)
print("V: ", V)
v = V.x % n
print("v: ", v)
h_2 = SHA3_256.new(b'25097' + v.to_bytes((v.bit_length() + 7) // 8, byteorder='big'))
h_new = (int.from_bytes(h_2.digest(), byteorder='big')) % n
if h == h_new:
print("true")
else:
print("false")
# get a message from server for
mes = {'ID': stuID}
response = requests.get('{}/{}'.format(API_URL, "STSStep6"), json=mes)
ctext = response.json()
print(ctext)
#Decrypt
num = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=num[0:8])
dtext = crypto.decrypt(num[8:])
decoded_dtext = dtext.decode('UTF-8')
print("Decrypted text: ", decoded_dtext)
#Add 1 to random to create the new message and encrypt it
random = decoded_dtext[decoded_dtext.index('.') + 2:]
text = decoded_dtext[:decoded_dtext.index('.') + 1]
#print("Text: ", text)
#print("Random: ", rand)
random = int(random) + 1
text = text + " " + str(random)
print(text)
text = bytes(text, 'utf-8')
crypto = AES.new(K.digest(), AES.MODE_CTR)
ctext = crypto.nonce + crypto.encrypt(text)
ct = int.from_bytes(ctext, byteorder='big')
print("Plaintext: ", text)
# send the message and get response of the server
mes = {'ID': stuID, 'ctext': ct}
response = requests.put('{}/{}'.format(API_URL, "STSStep7&8"), json=mes)
ctext = response.json()
print("Response: ", ctext)
num = ctext.to_bytes((ctext.bit_length() + 7) // 8, byteorder='big')
crypto = AES.new(K.digest(), AES.MODE_CTR, nonce=num[0:8])
dtext = crypto.decrypt(num[8:])
print("Decrypted text: ", dtext.decode('UTF-8'))
decoded_dtext = dtext.decode('UTF-8')
#print(decoded_dtext)
except Exception as e:
print(e)
| en | 0.577451 | # random.seed(42) # TODO: HERE CREATE A LONG TERM KEY # base point P is the generator # print("sL: ", sL) # print("LKey: ", lkey) # HERE GENERATE A EPHEMERAL KEY # base point P is the generator # server's long term key # REGISTRATION # STS PROTOCOL #calculate T,K,U #Sign Message #Encryption ###Send encrypted-signed keys and retrive server's signed keys #Decrypt #verify # get a message from server for #Decrypt #Add 1 to random to create the new message and encrypt it #print("Text: ", text) #print("Random: ", rand) # send the message and get response of the server #print(decoded_dtext) | 2.839599 | 3 |
mxnet/wide_deep_criteo/inference.py | XiaobingSuper/optimized-models | 25 | 6614558 | """inference script to support accuracy and performance benchmark"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
from datetime import datetime
import logging
import ctypes
import time
import os
import pickle
import mxnet as mx
from mxnet import nd
from mxnet.base import check_call, _LIB
def load_model(_symbol_file, _param_file, _logger=None):
"""load existing symbol model"""
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, _symbol_file)
if _logger is not None:
_logger.info('Loading symbol from file %s' % symbol_file_path)
symbol = mx.sym.load(symbol_file_path)
param_file_path = os.path.join(cur_path, _param_file)
if _logger is not None:
_logger.info('Loading params from file %s' % param_file_path)
save_dict = nd.load(param_file_path)
_arg_params = {}
_aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
_arg_params[name] = v
if tp == 'aux':
_aux_params[name] = v
return symbol, _arg_params, _aux_params
def advance_data_iter(data_iter, n):
"""use to warm up data for performance benchmark"""
assert n >= 0
if n == 0:
return data_iter
has_next_batch = True
while has_next_batch:
try:
data_iter.next()
n -= 1
if n == 0:
return data_iter
except StopIteration:
has_next_batch = False
CRITEO = {
'train': 'train.csv',
'test': 'eval.csv',
'num_linear_features': 26000,
'num_embed_features': 26,
'num_cont_features': 13,
'embed_input_dims': 1000,
'hidden_units': [32, 1024, 512, 256],
}
def load_object(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Score a model on a dataset')
parser.add_argument('--symbol-file', type=str, default='checkpoint-symbol.json', help='symbol file path')
parser.add_argument('--param-file', type=str, default='checkpoint-0000.params', help='param file path')
parser.add_argument('--batch-size', type=int, default=1024)
parser.add_argument('--label-name', type=str, default='softmax_label')
parser.add_argument('--accuracy', action='store_true')
parser.add_argument('--shuffle-dataset', action='store_true', default=True,
help='shuffle the calibration dataset')
parser.add_argument('--num-omp-threads', type=int, default=28)
parser.add_argument('--num-batches', type=int, default=100000)
parser.add_argument('--num-warmup', type=int, default=5000)
parser.add_argument('--cuda', action='store_true', help='Inference on GPU with CUDA')
parser.add_argument('--gpu-id', type=int, default=0)
args = parser.parse_args()
ctx = mx.gpu(args.gpu_id) if args.cuda else mx.cpu()
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
if args.accuracy is True:
logger.info('Accuracy Mode')
else:
logger.info('Performance Mode')
symbol_file = args.symbol_file
param_file = args.param_file
batch_size = args.batch_size
logger.info('batch size = %d for inference', batch_size)
label_name = args.label_name
logger.info('label_name = %s', label_name)
if args.accuracy is False:
val_csr = load_object('train_csr.pkl')
val_dns = load_object('train_dns.pkl')
val_label = load_object('train_label.pkl')
else:
val_csr = load_object('val_csr.pkl')
val_dns = load_object('val_dns.pkl')
val_label = load_object('val_label.pkl')
# creating data iterator
data = mx.io.NDArrayIter({'csr_data': val_csr, 'dns_data': val_dns},
{'softmax_label': val_label}, batch_size,
shuffle=False, last_batch_handle='discard')
# loading model
sym, arg_params, aux_params = load_model(symbol_file, param_file, logger)
# make sure that fp32 inference works on the same images as calibrated quantized model
logger.info('Running model %s for inference', symbol_file)
acc_m = mx.metric.create('acc')
mod = mx.mod.Module(symbol=sym, context=ctx, data_names=['csr_data', 'dns_data'], label_names=[label_name, ])
mod.bind(for_training=False,
data_shapes=data.provide_data,
label_shapes=data.provide_label)
mod.set_params(arg_params, aux_params)
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
batch_data = []
nbatch = 0
while nbatch < args.num_batches:
for batch in data:
batch_data.append(batch)
nbatch += 1
if nbatch < args.num_batches:
continue
else:
break
data.hard_reset()
#for data warmup
wi = args.num_warmup
i = 0
for batch in batch_data:
if i < wi:
mod.forward(batch, is_train=False)
i += 1
else:
break
data.hard_reset()
mx.nd.waitall()
#real run
if "DO_WIDE_DEEP_PROFILING" in os.environ:
print("wide_deep profiling start !!!!!!!!!!!!!")
mx.profiler.set_config(profile_symbolic=True, profile_imperative=True, profile_memory=False, profile_api=False)
mx.profiler.set_state('run')
nbatch = 0
tic = time.time()
logger.info('INFERENCING STARTED: %s', datetime.now().strftime("%m/%d/%Y %H:%M:%S.%f")[:-3])
for batch in batch_data:
nbatch += 1
mod.forward(batch, is_train=False)
if args.accuracy is True:
for output in mod.get_outputs():
output.wait_to_read()
mod.update_metric(acc_m, batch.label)
else:
mx.nd.waitall()
logger.info('INFERENCING FINISHED: %s', datetime.now().strftime("%m/%d/%Y %H:%M:%S.%f")[:-3])
speed = nbatch * batch_size / (time.time() - tic)
logger.info("Run [%d] Batchs \tSpeed: %.2f samples/sec", nbatch, speed)
if args.accuracy is True:
logger.info(acc_m.get())
if "DO_WIDE_DEEP_PROFILING" in os.environ:
print("wide_deep profiling end !")
mx.profiler.set_state('stop')
profiler_info = mx.profiler.dumps()
print(profiler_info)
| """inference script to support accuracy and performance benchmark"""
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
from datetime import datetime
import logging
import ctypes
import time
import os
import pickle
import mxnet as mx
from mxnet import nd
from mxnet.base import check_call, _LIB
def load_model(_symbol_file, _param_file, _logger=None):
"""load existing symbol model"""
cur_path = os.path.dirname(os.path.realpath(__file__))
symbol_file_path = os.path.join(cur_path, _symbol_file)
if _logger is not None:
_logger.info('Loading symbol from file %s' % symbol_file_path)
symbol = mx.sym.load(symbol_file_path)
param_file_path = os.path.join(cur_path, _param_file)
if _logger is not None:
_logger.info('Loading params from file %s' % param_file_path)
save_dict = nd.load(param_file_path)
_arg_params = {}
_aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
_arg_params[name] = v
if tp == 'aux':
_aux_params[name] = v
return symbol, _arg_params, _aux_params
def advance_data_iter(data_iter, n):
"""use to warm up data for performance benchmark"""
assert n >= 0
if n == 0:
return data_iter
has_next_batch = True
while has_next_batch:
try:
data_iter.next()
n -= 1
if n == 0:
return data_iter
except StopIteration:
has_next_batch = False
CRITEO = {
'train': 'train.csv',
'test': 'eval.csv',
'num_linear_features': 26000,
'num_embed_features': 26,
'num_cont_features': 13,
'embed_input_dims': 1000,
'hidden_units': [32, 1024, 512, 256],
}
def load_object(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Score a model on a dataset')
parser.add_argument('--symbol-file', type=str, default='checkpoint-symbol.json', help='symbol file path')
parser.add_argument('--param-file', type=str, default='checkpoint-0000.params', help='param file path')
parser.add_argument('--batch-size', type=int, default=1024)
parser.add_argument('--label-name', type=str, default='softmax_label')
parser.add_argument('--accuracy', action='store_true')
parser.add_argument('--shuffle-dataset', action='store_true', default=True,
help='shuffle the calibration dataset')
parser.add_argument('--num-omp-threads', type=int, default=28)
parser.add_argument('--num-batches', type=int, default=100000)
parser.add_argument('--num-warmup', type=int, default=5000)
parser.add_argument('--cuda', action='store_true', help='Inference on GPU with CUDA')
parser.add_argument('--gpu-id', type=int, default=0)
args = parser.parse_args()
ctx = mx.gpu(args.gpu_id) if args.cuda else mx.cpu()
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
if args.accuracy is True:
logger.info('Accuracy Mode')
else:
logger.info('Performance Mode')
symbol_file = args.symbol_file
param_file = args.param_file
batch_size = args.batch_size
logger.info('batch size = %d for inference', batch_size)
label_name = args.label_name
logger.info('label_name = %s', label_name)
if args.accuracy is False:
val_csr = load_object('train_csr.pkl')
val_dns = load_object('train_dns.pkl')
val_label = load_object('train_label.pkl')
else:
val_csr = load_object('val_csr.pkl')
val_dns = load_object('val_dns.pkl')
val_label = load_object('val_label.pkl')
# creating data iterator
data = mx.io.NDArrayIter({'csr_data': val_csr, 'dns_data': val_dns},
{'softmax_label': val_label}, batch_size,
shuffle=False, last_batch_handle='discard')
# loading model
sym, arg_params, aux_params = load_model(symbol_file, param_file, logger)
# make sure that fp32 inference works on the same images as calibrated quantized model
logger.info('Running model %s for inference', symbol_file)
acc_m = mx.metric.create('acc')
mod = mx.mod.Module(symbol=sym, context=ctx, data_names=['csr_data', 'dns_data'], label_names=[label_name, ])
mod.bind(for_training=False,
data_shapes=data.provide_data,
label_shapes=data.provide_label)
mod.set_params(arg_params, aux_params)
check_call(_LIB.MXSetNumOMPThreads(ctypes.c_int(args.num_omp_threads)))
batch_data = []
nbatch = 0
while nbatch < args.num_batches:
for batch in data:
batch_data.append(batch)
nbatch += 1
if nbatch < args.num_batches:
continue
else:
break
data.hard_reset()
#for data warmup
wi = args.num_warmup
i = 0
for batch in batch_data:
if i < wi:
mod.forward(batch, is_train=False)
i += 1
else:
break
data.hard_reset()
mx.nd.waitall()
#real run
if "DO_WIDE_DEEP_PROFILING" in os.environ:
print("wide_deep profiling start !!!!!!!!!!!!!")
mx.profiler.set_config(profile_symbolic=True, profile_imperative=True, profile_memory=False, profile_api=False)
mx.profiler.set_state('run')
nbatch = 0
tic = time.time()
logger.info('INFERENCING STARTED: %s', datetime.now().strftime("%m/%d/%Y %H:%M:%S.%f")[:-3])
for batch in batch_data:
nbatch += 1
mod.forward(batch, is_train=False)
if args.accuracy is True:
for output in mod.get_outputs():
output.wait_to_read()
mod.update_metric(acc_m, batch.label)
else:
mx.nd.waitall()
logger.info('INFERENCING FINISHED: %s', datetime.now().strftime("%m/%d/%Y %H:%M:%S.%f")[:-3])
speed = nbatch * batch_size / (time.time() - tic)
logger.info("Run [%d] Batchs \tSpeed: %.2f samples/sec", nbatch, speed)
if args.accuracy is True:
logger.info(acc_m.get())
if "DO_WIDE_DEEP_PROFILING" in os.environ:
print("wide_deep profiling end !")
mx.profiler.set_state('stop')
profiler_info = mx.profiler.dumps()
print(profiler_info)
| en | 0.856345 | inference script to support accuracy and performance benchmark # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. load existing symbol model use to warm up data for performance benchmark # creating data iterator # loading model # make sure that fp32 inference works on the same images as calibrated quantized model #for data warmup #real run | 1.886186 | 2 |
tests/test_filter.py | mhubl/botrecon | 0 | 6614559 | from click.testing import CliRunner
from pathlib import Path
import re
from botrecon import botrecon
import warnings
runner = CliRunner()
path = str(Path('tests', 'data', 'filter.csv'))
regex = r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}'
def make_args(ips, path):
args = []
for ip in ips:
args += ['--ip', ip]
args += [path]
return args
def test_filter_ip(ip='172.16.31.10'):
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*patsy'
)
# For some reason this line causes a warning,
# but it doesn't happen in any of the other tests
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 1
assert matches[0] == ip
def test_filter_ip_multiple():
ips = ['172.16.31.10', '10.0.0.7', '172.16.31.10', '172.16.58.3']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == len(ips)
def test_filter_range(ip='172.16.58.3/24'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range2(ip='172.16.58.3/255.255.255.0'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range3(ip='172.16.58.3/8'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 8
def test_filter_range4(ip='172.16.58.3/255.255.255.0'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range_multiple():
ips = ['172.16.58.3/24', '10.0.0.0/16', '192.168.127.12/16']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 4
def test_filter_mixed():
ips = ['172.16.31.10/24', '10.1.0.0/16', '172.16.17.32', '192.168.127.12']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
def test_filter_from_file(tmp_path):
ips = ['172.16.31.10/24', '10.1.0.0/16', '172.16.17.32', '192.168.127.12']
ips = "\n".join(ips)
tmp_path = tmp_path / 'ip_test'
tmp_path.write_text(ips)
result = runner.invoke(botrecon, ['--ip', str(tmp_path), path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
def test_filter_from_files(tmp_path):
ips = ['172.16.31.10/24', '172.16.17.32', '10.1.0.0/16', '192.168.127.12']
# Divide the list into two
ips1 = "\n".join(ips[:2])
ips2 = "\n".join(ips[2:])
# Save it as two separate files
tmp_path2 = tmp_path / 'ip_test2'
tmp_path1 = tmp_path / 'ip_test1'
tmp_path1.write_text(ips1)
tmp_path2.write_text(ips2)
args = make_args([str(tmp_path1), str(tmp_path2)], path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
| from click.testing import CliRunner
from pathlib import Path
import re
from botrecon import botrecon
import warnings
runner = CliRunner()
path = str(Path('tests', 'data', 'filter.csv'))
regex = r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}'
def make_args(ips, path):
args = []
for ip in ips:
args += ['--ip', ip]
args += [path]
return args
def test_filter_ip(ip='172.16.31.10'):
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*patsy'
)
# For some reason this line causes a warning,
# but it doesn't happen in any of the other tests
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 1
assert matches[0] == ip
def test_filter_ip_multiple():
ips = ['172.16.31.10', '10.0.0.7', '172.16.31.10', '172.16.58.3']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == len(ips)
def test_filter_range(ip='172.16.58.3/24'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range2(ip='172.16.58.3/255.255.255.0'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range3(ip='172.16.58.3/8'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 8
def test_filter_range4(ip='172.16.58.3/255.255.255.0'):
result = runner.invoke(botrecon, ['--ip', ip, path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4
def test_filter_range_multiple():
ips = ['172.16.58.3/24', '10.0.0.0/16', '192.168.127.12/16']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 4
def test_filter_mixed():
ips = ['172.16.31.10/24', '10.1.0.0/16', '172.16.17.32', '192.168.127.12']
args = make_args(ips, path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
def test_filter_from_file(tmp_path):
ips = ['172.16.31.10/24', '10.1.0.0/16', '172.16.17.32', '192.168.127.12']
ips = "\n".join(ips)
tmp_path = tmp_path / 'ip_test'
tmp_path.write_text(ips)
result = runner.invoke(botrecon, ['--ip', str(tmp_path), path])
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
def test_filter_from_files(tmp_path):
ips = ['172.16.31.10/24', '172.16.17.32', '10.1.0.0/16', '192.168.127.12']
# Divide the list into two
ips1 = "\n".join(ips[:2])
ips2 = "\n".join(ips[2:])
# Save it as two separate files
tmp_path2 = tmp_path / 'ip_test2'
tmp_path1 = tmp_path / 'ip_test1'
tmp_path1.write_text(ips1)
tmp_path2.write_text(ips2)
args = make_args([str(tmp_path1), str(tmp_path2)], path)
result = runner.invoke(botrecon, args)
out = str(result.stdout_bytes)
matches = re.findall(regex, out)
assert len(matches) == 4 + 2 + 2
| en | 0.957959 | # For some reason this line causes a warning, # but it doesn't happen in any of the other tests # Divide the list into two # Save it as two separate files | 2.159149 | 2 |
CNN/run.py | MJC598/RJI_Quality_Analysis | 0 | 6614560 | <filename>CNN/run.py
import im_to_mat
def build_cnn():
def cnn_model_fn(features, labels, mode):
#input layer
#convolutional layer
#pooling layer
if __name__ == '__main__':
| <filename>CNN/run.py
import im_to_mat
def build_cnn():
def cnn_model_fn(features, labels, mode):
#input layer
#convolutional layer
#pooling layer
if __name__ == '__main__':
| uk | 0.135942 | #input layer #convolutional layer #pooling layer | 2.338152 | 2 |
experiments/src/train_doc2vec.py | aseifert/million-post-corpus | 13 | 6614561 | <filename>experiments/src/train_doc2vec.py
import logging
import multiprocessing
import os
import sqlite3
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
from preprocessing import normalize, micro_tokenize
from customlogging import logger
import conf
def preprocess(row):
if row[0] and row[1]:
txt = row[0] + ' ' + row[1]
elif row[0]:
txt = row[0]
elif row[1]:
txt = row[1]
else:
txt = ''
return micro_tokenize(normalize(txt))
def get_post_documents():
logger.debug('Fetching unlabeled posts from database')
con = sqlite3.connect(conf.CORPUSDB)
sql = '''
SELECT ID_Post, COALESCE(Headline, '') || ' ' || COALESCE(Body, '')
FROM Posts
WHERE ID_Post NOT IN (
SELECT DISTINCT ID_Post
FROM Annotations
)
'''
r = con.execute(sql)
pool = multiprocessing.Pool()
while True:
rows = r.fetchmany(100000)
if len(rows) == 0:
break
logger.debug('Normalizing and tokenizing')
wordlists = pool.map(micro_tokenize,
pool.map(normalize, [ r[1] for r in rows ]))
for i, words in enumerate(wordlists):
yield TaggedDocument(words, [ rows[i][0] ])
pool.close()
pool.join()
logger.debug('End of generator')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [doc2vec] : %(message)s',
level=logging.INFO)
d2v = Doc2Vec(dm=1, size=conf.D2V_DIMS, negative=5, iter=1,
alpha=conf.D2V_ALPHA, seed=conf.SEED, workers=1)
logger.debug('Building doc2vec vocabulary...')
d2v.build_vocab(get_post_documents())
logger.debug('doc2vec training...')
alpha = conf.D2V_ALPHA
alpha_delta = (conf.D2V_ALPHA - conf.D2V_MINALPHA) / conf.D2V_EPOCHS
for i in range(conf.D2V_EPOCHS):
logger.debug('Epoch %d of %d (alpha = %f)', i+1, conf.D2V_EPOCHS, alpha)
d2v.alpha = alpha
d2v.train(get_post_documents(), report_delay=10.0)
alpha -= alpha_delta
if not os.path.exists(conf.D2V_DIR):
os.mkdir(conf.D2V_DIR)
outfile = os.path.join(conf.D2V_DIR, 'model')
logger.debug('Storing doc2vec object to "%s"' % outfile)
del d2v.docvecs.doctag_syn0
del d2v.docvecs.doctag_syn0_lockf
d2v.save(outfile, pickle_protocol=3)
logger.debug('Finished.')
| <filename>experiments/src/train_doc2vec.py
import logging
import multiprocessing
import os
import sqlite3
from gensim.models.doc2vec import TaggedDocument, Doc2Vec
from preprocessing import normalize, micro_tokenize
from customlogging import logger
import conf
def preprocess(row):
if row[0] and row[1]:
txt = row[0] + ' ' + row[1]
elif row[0]:
txt = row[0]
elif row[1]:
txt = row[1]
else:
txt = ''
return micro_tokenize(normalize(txt))
def get_post_documents():
logger.debug('Fetching unlabeled posts from database')
con = sqlite3.connect(conf.CORPUSDB)
sql = '''
SELECT ID_Post, COALESCE(Headline, '') || ' ' || COALESCE(Body, '')
FROM Posts
WHERE ID_Post NOT IN (
SELECT DISTINCT ID_Post
FROM Annotations
)
'''
r = con.execute(sql)
pool = multiprocessing.Pool()
while True:
rows = r.fetchmany(100000)
if len(rows) == 0:
break
logger.debug('Normalizing and tokenizing')
wordlists = pool.map(micro_tokenize,
pool.map(normalize, [ r[1] for r in rows ]))
for i, words in enumerate(wordlists):
yield TaggedDocument(words, [ rows[i][0] ])
pool.close()
pool.join()
logger.debug('End of generator')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s [doc2vec] : %(message)s',
level=logging.INFO)
d2v = Doc2Vec(dm=1, size=conf.D2V_DIMS, negative=5, iter=1,
alpha=conf.D2V_ALPHA, seed=conf.SEED, workers=1)
logger.debug('Building doc2vec vocabulary...')
d2v.build_vocab(get_post_documents())
logger.debug('doc2vec training...')
alpha = conf.D2V_ALPHA
alpha_delta = (conf.D2V_ALPHA - conf.D2V_MINALPHA) / conf.D2V_EPOCHS
for i in range(conf.D2V_EPOCHS):
logger.debug('Epoch %d of %d (alpha = %f)', i+1, conf.D2V_EPOCHS, alpha)
d2v.alpha = alpha
d2v.train(get_post_documents(), report_delay=10.0)
alpha -= alpha_delta
if not os.path.exists(conf.D2V_DIR):
os.mkdir(conf.D2V_DIR)
outfile = os.path.join(conf.D2V_DIR, 'model')
logger.debug('Storing doc2vec object to "%s"' % outfile)
del d2v.docvecs.doctag_syn0
del d2v.docvecs.doctag_syn0_lockf
d2v.save(outfile, pickle_protocol=3)
logger.debug('Finished.')
| en | 0.555386 | SELECT ID_Post, COALESCE(Headline, '') || ' ' || COALESCE(Body, '') FROM Posts WHERE ID_Post NOT IN ( SELECT DISTINCT ID_Post FROM Annotations ) | 2.538418 | 3 |
examples/sound_example.py | RiccardoTOTI/TikTok-Api-1 | 3 | 6614562 | from TikTokApi import TikTokApi
verify_fp = "verify_xxx"
api = TikTokApi(custom_verify_fp=verify_fp)
sound = api.sound(id="7016547803243022337")
for video in sound.videos():
print(video.id)
| from TikTokApi import TikTokApi
verify_fp = "verify_xxx"
api = TikTokApi(custom_verify_fp=verify_fp)
sound = api.sound(id="7016547803243022337")
for video in sound.videos():
print(video.id)
| none | 1 | 2.100198 | 2 | |
module1-introduction-to-sql/rpg_queries.py | hughjafro/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 6614563 | <gh_stars>0
#!/usr/bin/python
# Import packages
import sqlite3
import pandas as pd
# Create a connection to data file and set cursor
conn = sqlite3.connect('rpg_db.sqlite3')
curs = conn.cursor
def select_all_tasks(conn):
'''
Query all rows in the table
'''
# Assignment Questions
# How many total Characters are there?
SELECT COUNT(*) AS total_characters
FROM charactercreator_character;
=302
# How many of each specific subclass?
# CLERICS = 75
SELECT COUNT(*) AS total_clerics
FROM charactercreator_cleric;
# FIGHERS = 68
SELECT COUNT(*) AS total_fighters
FROM charactercreator_fighter;
# MAGES = 108
SELECT COUNT(*) AS total_mages
FROM charactercreator_mage;
# NECROMANCERS = 11
SELECT COUNT(*) AS total_necros
FROM charactercreator_necromancer;
# THIEVES = 51
SELECT COUNT(*) AS total_thieves
FROM charactercreator_thief;
# How many total Items?
SELECT DISTINCT item_id AS item_count
FROM armory_item;
=174
# How many of the Items are weapons? How many are not?
SELECT DISTINCT item_ptr_id AS weapon_count
FROM armory_item;
=37
=174-37
# How many Items does each character have? (Return first 20 rows)
SELECT character_id, COUNT(item_id) as item_num
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20;
# How many Weapons does each character have? (Return first 20 rowdef s)
# On average, how many Items does each Character have?
SELECT AVG(items) AS avg_items
FROM (charactercreator_character_inventory
GROUP BY character_id
# On average, how many Weapons does each character have? | #!/usr/bin/python
# Import packages
import sqlite3
import pandas as pd
# Create a connection to data file and set cursor
conn = sqlite3.connect('rpg_db.sqlite3')
curs = conn.cursor
def select_all_tasks(conn):
'''
Query all rows in the table
'''
# Assignment Questions
# How many total Characters are there?
SELECT COUNT(*) AS total_characters
FROM charactercreator_character;
=302
# How many of each specific subclass?
# CLERICS = 75
SELECT COUNT(*) AS total_clerics
FROM charactercreator_cleric;
# FIGHERS = 68
SELECT COUNT(*) AS total_fighters
FROM charactercreator_fighter;
# MAGES = 108
SELECT COUNT(*) AS total_mages
FROM charactercreator_mage;
# NECROMANCERS = 11
SELECT COUNT(*) AS total_necros
FROM charactercreator_necromancer;
# THIEVES = 51
SELECT COUNT(*) AS total_thieves
FROM charactercreator_thief;
# How many total Items?
SELECT DISTINCT item_id AS item_count
FROM armory_item;
=174
# How many of the Items are weapons? How many are not?
SELECT DISTINCT item_ptr_id AS weapon_count
FROM armory_item;
=37
=174-37
# How many Items does each character have? (Return first 20 rows)
SELECT character_id, COUNT(item_id) as item_num
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20;
# How many Weapons does each character have? (Return first 20 rowdef s)
# On average, how many Items does each Character have?
SELECT AVG(items) AS avg_items
FROM (charactercreator_character_inventory
GROUP BY character_id
# On average, how many Weapons does each character have? | en | 0.943248 | #!/usr/bin/python # Import packages # Create a connection to data file and set cursor Query all rows in the table # Assignment Questions # How many total Characters are there? # How many of each specific subclass? # CLERICS = 75 # FIGHERS = 68 # MAGES = 108 # NECROMANCERS = 11 # THIEVES = 51 # How many total Items? # How many of the Items are weapons? How many are not? # How many Items does each character have? (Return first 20 rows) # How many Weapons does each character have? (Return first 20 rowdef s) # On average, how many Items does each Character have? # On average, how many Weapons does each character have? | 3.748915 | 4 |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/middlewares.py | juniperus/aiohttp-rest-yat | 0 | 6614564 | <reponame>juniperus/aiohttp-rest-yat
from aiohttp import web
import uuid
@web.middleware
async def render_json(request, handler):
response = await handler(request)
return web.json_response(response)
@web.middleware
async def correlation_id(request, handler):
response = await handler(request)
response.headers['X-Request-ID'] = request.headers.get('X-Request-ID', str(uuid.uuid4()))
return response | from aiohttp import web
import uuid
@web.middleware
async def render_json(request, handler):
response = await handler(request)
return web.json_response(response)
@web.middleware
async def correlation_id(request, handler):
response = await handler(request)
response.headers['X-Request-ID'] = request.headers.get('X-Request-ID', str(uuid.uuid4()))
return response | none | 1 | 2.319277 | 2 | |
plugin/LLDB.framework/Versions/A/Resources/Python/lldb/formatters/objc/CFBitVector.py | filcab/SublimeLLDB | 12 | 6614565 | """
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for CF(Mutable)BitVector
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
# first define some utility functions
def byte_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos/8
def bit_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos & 7
def get_bit(byte,index):
logger = lldb.formatters.Logger.Logger()
if index < 0 or index > 7:
return None
return (byte >> (7-index)) & 1
def grab_array_item_data(pointer,index):
logger = lldb.formatters.Logger.Logger()
return pointer.GetPointeeData(index,1)
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for a CF*BitVector, so they need not
# obey the interface specification for synthetic children providers
class CFBitVectorKnown_SummaryProvider:
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.uiint_size = self.sys_params.types_cache.NSUInteger.GetByteSize()
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType()
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
# we skip the CFRuntimeBase
# then the next CFIndex is the count
# then we skip another CFIndex and then we get at a byte array
# that wraps the individual bits
def contents(self):
logger = lldb.formatters.Logger.Logger()
count_vo = self.valobj.CreateChildAtOffset("count",self.sys_params.cfruntime_size,
self.sys_params.types_cache.NSUInteger)
count = count_vo.GetValueAsUnsigned(0)
if count == 0:
return '(empty)'
array_vo = self.valobj.CreateChildAtOffset("data",
self.sys_params.cfruntime_size+2*self.uiint_size,
self.sys_params.types_cache.charptr)
data_list = []
cur_byte_pos = None
for i in range(0,count):
if cur_byte_pos == None:
cur_byte_pos = byte_index(i)
cur_byte = grab_array_item_data(array_vo,cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
else:
byte_pos = byte_index(i)
# do not fetch the pointee data every single time through
if byte_pos != cur_byte_pos:
cur_byte_pos = byte_pos
cur_byte = grab_array_item_data(array_vo,cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
bit = get_bit(cur_byte_val,bit_index(i))
if (i % 4) == 0:
data_list.append(' ')
if bit == 1:
data_list.append('1')
else:
data_list.append('0')
return ''.join(data_list)
class CFBitVectorUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
def contents(self):
logger = lldb.formatters.Logger.Logger()
return '<unable to summarize this CFBitVector>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data,wrapper =lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj,statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + str(name_string) + " but actual name is " + str(actual_name)
if class_data.is_cftype():
# CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is
# an NSCFType and then check we are a pointer-to CFBitVectorRef
valobj_type = valobj.GetType()
if valobj_type.IsValid() and valobj_type.IsPointerType():
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBitVector' or actual_name == '__CFMutableBitVector':
wrapper = CFBitVectorKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun',valobj)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print actual_name
else:
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print name_string
statistics.metric_hit('unknown_class',valobj.GetName() + " seen as " + name_string)
return wrapper;
def CFBitVector_SummaryProvider (valobj,dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj);
if provider != None:
if isinstance(provider,lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.contents();
except:
summary = None
logger >> "summary got from provider: " + str(summary)
if summary == None or summary == '':
summary = '<variable is not CFBitVector>'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger,dict):
debugger.HandleCommand("type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef")
| """
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for CF(Mutable)BitVector
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
# first define some utility functions
def byte_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos/8
def bit_index(abs_pos):
logger = lldb.formatters.Logger.Logger()
return abs_pos & 7
def get_bit(byte,index):
logger = lldb.formatters.Logger.Logger()
if index < 0 or index > 7:
return None
return (byte >> (7-index)) & 1
def grab_array_item_data(pointer,index):
logger = lldb.formatters.Logger.Logger()
return pointer.GetPointeeData(index,1)
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# despite the similary to synthetic children providers, these classes are not
# trying to provide anything but a summary for a CF*BitVector, so they need not
# obey the interface specification for synthetic children providers
class CFBitVectorKnown_SummaryProvider:
def adjust_for_architecture(self):
logger = lldb.formatters.Logger.Logger()
self.uiint_size = self.sys_params.types_cache.NSUInteger.GetByteSize()
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
if not(self.sys_params.types_cache.NSUInteger):
if self.sys_params.is_64_bit:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedLong)
else:
self.sys_params.types_cache.NSUInteger = self.valobj.GetType().GetBasicType(lldb.eBasicTypeUnsignedInt)
if not(self.sys_params.types_cache.charptr):
self.sys_params.types_cache.charptr = self.valobj.GetType().GetBasicType(lldb.eBasicTypeChar).GetPointerType()
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
# we skip the CFRuntimeBase
# then the next CFIndex is the count
# then we skip another CFIndex and then we get at a byte array
# that wraps the individual bits
def contents(self):
logger = lldb.formatters.Logger.Logger()
count_vo = self.valobj.CreateChildAtOffset("count",self.sys_params.cfruntime_size,
self.sys_params.types_cache.NSUInteger)
count = count_vo.GetValueAsUnsigned(0)
if count == 0:
return '(empty)'
array_vo = self.valobj.CreateChildAtOffset("data",
self.sys_params.cfruntime_size+2*self.uiint_size,
self.sys_params.types_cache.charptr)
data_list = []
cur_byte_pos = None
for i in range(0,count):
if cur_byte_pos == None:
cur_byte_pos = byte_index(i)
cur_byte = grab_array_item_data(array_vo,cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
else:
byte_pos = byte_index(i)
# do not fetch the pointee data every single time through
if byte_pos != cur_byte_pos:
cur_byte_pos = byte_pos
cur_byte = grab_array_item_data(array_vo,cur_byte_pos)
cur_byte_val = cur_byte.uint8[0]
bit = get_bit(cur_byte_val,bit_index(i))
if (i % 4) == 0:
data_list.append(' ')
if bit == 1:
data_list.append('1')
else:
data_list.append('0')
return ''.join(data_list)
class CFBitVectorUnknown_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj;
self.sys_params = params
self.update();
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture();
def contents(self):
logger = lldb.formatters.Logger.Logger()
return '<unable to summarize this CFBitVector>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data,wrapper =lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(valobj,statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
actual_name = name_string
logger >> "name string got was " + str(name_string) + " but actual name is " + str(actual_name)
if class_data.is_cftype():
# CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is
# an NSCFType and then check we are a pointer-to CFBitVectorRef
valobj_type = valobj.GetType()
if valobj_type.IsValid() and valobj_type.IsPointerType():
valobj_type = valobj_type.GetPointeeType()
if valobj_type.IsValid():
actual_name = valobj_type.GetName()
if actual_name == '__CFBitVector' or actual_name == '__CFMutableBitVector':
wrapper = CFBitVectorKnown_SummaryProvider(valobj, class_data.sys_params)
statistics.metric_hit('code_notrun',valobj)
else:
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print actual_name
else:
wrapper = CFBitVectorUnknown_SummaryProvider(valobj, class_data.sys_params)
print name_string
statistics.metric_hit('unknown_class',valobj.GetName() + " seen as " + name_string)
return wrapper;
def CFBitVector_SummaryProvider (valobj,dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj);
if provider != None:
if isinstance(provider,lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.contents();
except:
summary = None
logger >> "summary got from provider: " + str(summary)
if summary == None or summary == '':
summary = '<variable is not CFBitVector>'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger,dict):
debugger.HandleCommand("type summary add -F CFBitVector.CFBitVector_SummaryProvider CFBitVectorRef CFMutableBitVectorRef")
| en | 0.81266 | LLDB AppKit formatters part of The LLVM Compiler Infrastructure This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details. # summary provider for CF(Mutable)BitVector # first define some utility functions # despite the similary to synthetic children providers, these classes are not # trying to provide anything but a summary for a CF*BitVector, so they need not # obey the interface specification for synthetic children providers # we skip the CFRuntimeBase # then the next CFIndex is the count # then we skip another CFIndex and then we get at a byte array # that wraps the individual bits # do not fetch the pointee data every single time through # CFBitVectorRef does not expose an actual NSWrapper type, so we have to check that this is # an NSCFType and then check we are a pointer-to CFBitVectorRef | 1.861302 | 2 |
code/train.py | lylinsh/Edge-loss-for-image-inpainting | 4 | 6614566 | import torch
import torch.nn as nn
import torchvision as tv
import numpy as np
import os
from torchvision.models import vgg16
from utils import *
from model import *
from losses import *
from edgeModel import *
from Vgg_models import Vgg16
def model_init(m):
'''模型初始化'''
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data)
nn.init.xavier_normal_(m.weight.data)
nn.init.kaiming_normal_(m.weight.data)
# nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if __name__ == "__main__":
opt = Config()
# 指定GPU
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_num
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
# 初始化权重保存路径
gen1_weights_path = os.path.join(opt.model_pth_pre, "generator1.pth")
gen2_weights_path = os.path.join(opt.model_pth_pre, "generator2.pth")
dis_weights_path = os.path.join(opt.model_pth_pre, "discriminator.pth")
# 初始化模型
netG1 = Generator1()
netG2 = Generator2()
netD = Discriminator(opt)
# 判断是否存在预训练模型,若存在,则先加载模型,否则进行初始化
# 常用于断点训练
if os.path.exists(gen1_weights_path):
gen1_pretrain = torch.load(gen1_weights_path)
e1 = gen1_pretrain["iteration"]
gen2_pretrain = torch.load(gen2_weights_path)
e2 = gen2_pretrain["iteration"]
dis_pretrain = torch.load(dis_weights_path)
netG1.load_state_dict(gen1_pretrain["generator1"])
netG2.load_state_dict(gen2_pretrain["generator2"])
netD.load_state_dict(dis_pretrain["discriminator"])
netG1.train()
netG2.train()
netD.train()
e = max(e1, e2)
print(e)
# e = 0
else:
e = 0
b = 0
netG1.apply(model_init)
netG2.apply(model_init)
netD.apply(model_init)
data_dir = opt.data_dir
# 初始化优化函数
optimizer_G1 = torch.optim.Adam(netG1.parameters(), opt.lr, betas=(0.5, 0.999))
optimizer_G2 = torch.optim.Adam(netG2.parameters(), opt.lr, betas=(0.5, 0.999))
optimizer_D = torch.optim.Adam(netD.parameters(), opt.lr, betas=(0.5, 0.999))
true_labels = 1
false_labels = 0
losses_weight = opt.losses_weight
losses = {}
# 加载VGG16,用于计算损失函数
vgg = Vgg16().eval()
vgg_16_pretrained = vgg16(pretrained=True)
pretrained_dict = vgg_16_pretrained.state_dict()
vgg_dict = vgg.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in vgg_dict.keys()}
vgg_dict.update(pretrained_dict)
vgg.load_state_dict(vgg_dict)
edgeGenerator = EdgeGenerator().eval()
if opt.use_gpu:
edgeGenerator_pretrained = torch.load("./../checkpoints/place2/EdgeModel_gen.pth")
else:
edgeGenerator_pretrained = torch.load("./../checkpoints/place2/EdgeModel_gen.pth",
map_location='cpu')
edgeGenerator_dict = edgeGenerator.state_dict()
value_set = {}
for k in edgeGenerator_dict.keys():
value_set[k] = edgeGenerator_pretrained["generator"][k]
edgeGenerator_dict.update(value_set)
edgeGenerator.load_state_dict(edgeGenerator_dict)
if not os.path.isdir(opt.result_pth):
os.mkdir(opt.result_pth)
if not os.path.isdir(opt.model_pth):
os.mkdir(opt.model_pth)
if opt.use_gpu:
netG1.cuda()
netG2.cuda()
netD.cuda()
# true_labels, false_labels = true_labels.cuda(), false_labels.cuda()
vgg = vgg.cuda()
edgeGenerator = edgeGenerator.cuda()
# 开始训练
for i in range(e, opt.epoches):
# 调用数据加载函数,加载训练集
data = dataSet(data_dir, batchsize=opt.batchsize, shuffle=True)
# 根据训练集,对网络模型的权重进行迭代更新
for ii, (img, _) in enumerate(data):
if opt.use_gpu:
img = img.cuda()
img_raw, msk_in, img_in = torch.chunk(img, 3, 3)
msk_in = (msk_in + 1) // 2
msk_in = msk_in[:, 0:1, :, :]
if i < opt.section1:
data_in = torch.cat((img_in, 1 - msk_in), 1)
optimizer_G1.zero_grad()
# 前向
img_gen1 = netG1(data_in)
# 计算损失函数
losses["l1"] = loss_l1(opt, img_raw, img_gen1)
losses["l2"] = loss_l2(opt, img_raw, img_gen1)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen1*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen1, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen1, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen1, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen1)
loss_G1 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G1.data))
# 反向传播
loss_G1.backward()
optimizer_G1.step()
elif i < opt.section2:
# 第二阶段的训练
data_in1 = torch.cat((img_in, 1 - msk_in), 1)
optimizer_D.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2).detach()
dis_real, dis_real_s = netD(img_raw)
dis_gen, dis_gen_s = netD(img_gen2)
losses["dloss_gen"] = loss_gan(opt, false_labels, dis_gen_s)
losses["dloss_img"] = loss_gan(opt, true_labels, dis_real_s)
loss_D = (losses["dloss_gen"] + losses["dloss_img"])/2
loss_D.backward()
optimizer_D.step()
optimizer_G2.zero_grad()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2)
dis_gen, dis_gen_s = netD(img_gen2)
losses["l1"] = loss_l1(opt, img_raw, img_gen2)
losses["l2"] = loss_l2(opt, img_raw, img_gen2)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen2*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen2, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen2, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen2, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen2)
losses["gloss"] = loss_gan(opt, true_labels, dis_gen_s)
loss_G2 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"] + \
losses_weight["dcgan"] * losses["gloss"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G2.data))
loss_G2.backward()
optimizer_G2.step()
else:
data_in1 = torch.cat((img_in, 1 - msk_in), 1)
optimizer_D.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2).detach()
dis_real, dis_real_s = netD(img_raw)
dis_gen, dis_gen_s = netD(img_gen2)
losses["dloss_gen"] = loss_gan(opt, false_labels, dis_gen_s)
losses["dloss_img"] = loss_gan(opt, true_labels, dis_real_s)
loss_D = (losses["dloss_gen"] + losses["dloss_img"])/2
loss_D.backward()
optimizer_D.step()
optimizer_G1.zero_grad()
img_gen1 = netG1(data_in1)
dis_gen, dis_gen_s = netD(img_gen1)
losses["l1"] = loss_l1(opt, img_raw, img_gen1)
losses["l2"] = loss_l2(opt, img_raw, img_gen1)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen1*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen1, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen1, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen1, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen1)
loss_G1 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G1.data))
loss_G1.backward()
optimizer_G1.step()
optimizer_G2.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2)
dis_gen, dis_gen_s = netD(img_gen2)
losses["l1"] = loss_l1(opt, img_raw, img_gen2)
losses["l2"] = loss_l2(opt, img_raw, img_gen2)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen2*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen2, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen2, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen2, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen2)
losses["gloss"] = loss_gan(opt, true_labels, dis_gen_s)
loss_G2 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"] + \
losses_weight["dcgan"] * losses["gloss"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G2.data))
loss_G2.backward()
optimizer_G2.step()
if (i+1) % 2 == 0 and (ii+1) % 25 == 0:
tv.utils.save_image(img_in[0], '%s/%s_img.png' %(opt.result_pth, ii+1), normalize=True)
tv.utils.save_image(img_gen1[0], '%s/%s_rst1.png' %(opt.result_pth, ii+1), normalize=True)
if i >= opt.section1:
tv.utils.save_image(img_gen2[0], '%s/%s_rst2.png' %(opt.result_pth, ii+1), normalize=True)
if i < opt.section1:
print("l1:{:.5f} l2:{:.5f} p:{:.5f} s:{:.5f} e:{:.5f} tv:{:.5f}".format(
losses["l1"].data, losses["l2"].data, losses["perpectual"].data,
losses["style"].data, losses["edge"].data, losses["tv"].data))
else:
print("l1:{:.5f} l2:{:.5f} p:{:.5f} s:{:.5f} e:{:.5f} tv:{:.5f} dc_g:{:.5f}".format(
losses["l1"].data, losses["l2"].data, losses["perpectual"].data,
losses["style"].data, losses["edge"].data, losses["tv"].data, losses["gloss"].data))
# 保存模型
if (i+1)%10 == 0:
gen1_weights_path = os.path.join(opt.model_pth, "generator1_"+str(i+1)+".pth")
gen2_weights_path = os.path.join(opt.model_pth, "generator2_"+str(i+1)+".pth")
dis_weights_path = os.path.join(opt.model_pth, "discriminator_"+str(i+1)+".pth")
else:
gen1_weights_path = os.path.join(opt.model_pth, "generator1.pth")
gen2_weights_path = os.path.join(opt.model_pth, "generator2.pth")
dis_weights_path = os.path.join(opt.model_pth, "discriminator.pth")
if not os.path.isdir(opt.model_pth):
os.mkdir(opt.model_pth)
torch.save({
'iteration': i+1,
'generator1': netG1.state_dict()
}, gen1_weights_path)
torch.save({
'iteration': i+1,
'generator2': netG2.state_dict()
}, gen2_weights_path)
torch.save({
'discriminator': netD.state_dict()
}, dis_weights_path)
| import torch
import torch.nn as nn
import torchvision as tv
import numpy as np
import os
from torchvision.models import vgg16
from utils import *
from model import *
from losses import *
from edgeModel import *
from Vgg_models import Vgg16
def model_init(m):
'''模型初始化'''
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data)
nn.init.xavier_normal_(m.weight.data)
nn.init.kaiming_normal_(m.weight.data)
# nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if __name__ == "__main__":
opt = Config()
# 指定GPU
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_num
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
# 初始化权重保存路径
gen1_weights_path = os.path.join(opt.model_pth_pre, "generator1.pth")
gen2_weights_path = os.path.join(opt.model_pth_pre, "generator2.pth")
dis_weights_path = os.path.join(opt.model_pth_pre, "discriminator.pth")
# 初始化模型
netG1 = Generator1()
netG2 = Generator2()
netD = Discriminator(opt)
# 判断是否存在预训练模型,若存在,则先加载模型,否则进行初始化
# 常用于断点训练
if os.path.exists(gen1_weights_path):
gen1_pretrain = torch.load(gen1_weights_path)
e1 = gen1_pretrain["iteration"]
gen2_pretrain = torch.load(gen2_weights_path)
e2 = gen2_pretrain["iteration"]
dis_pretrain = torch.load(dis_weights_path)
netG1.load_state_dict(gen1_pretrain["generator1"])
netG2.load_state_dict(gen2_pretrain["generator2"])
netD.load_state_dict(dis_pretrain["discriminator"])
netG1.train()
netG2.train()
netD.train()
e = max(e1, e2)
print(e)
# e = 0
else:
e = 0
b = 0
netG1.apply(model_init)
netG2.apply(model_init)
netD.apply(model_init)
data_dir = opt.data_dir
# 初始化优化函数
optimizer_G1 = torch.optim.Adam(netG1.parameters(), opt.lr, betas=(0.5, 0.999))
optimizer_G2 = torch.optim.Adam(netG2.parameters(), opt.lr, betas=(0.5, 0.999))
optimizer_D = torch.optim.Adam(netD.parameters(), opt.lr, betas=(0.5, 0.999))
true_labels = 1
false_labels = 0
losses_weight = opt.losses_weight
losses = {}
# 加载VGG16,用于计算损失函数
vgg = Vgg16().eval()
vgg_16_pretrained = vgg16(pretrained=True)
pretrained_dict = vgg_16_pretrained.state_dict()
vgg_dict = vgg.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in vgg_dict.keys()}
vgg_dict.update(pretrained_dict)
vgg.load_state_dict(vgg_dict)
edgeGenerator = EdgeGenerator().eval()
if opt.use_gpu:
edgeGenerator_pretrained = torch.load("./../checkpoints/place2/EdgeModel_gen.pth")
else:
edgeGenerator_pretrained = torch.load("./../checkpoints/place2/EdgeModel_gen.pth",
map_location='cpu')
edgeGenerator_dict = edgeGenerator.state_dict()
value_set = {}
for k in edgeGenerator_dict.keys():
value_set[k] = edgeGenerator_pretrained["generator"][k]
edgeGenerator_dict.update(value_set)
edgeGenerator.load_state_dict(edgeGenerator_dict)
if not os.path.isdir(opt.result_pth):
os.mkdir(opt.result_pth)
if not os.path.isdir(opt.model_pth):
os.mkdir(opt.model_pth)
if opt.use_gpu:
netG1.cuda()
netG2.cuda()
netD.cuda()
# true_labels, false_labels = true_labels.cuda(), false_labels.cuda()
vgg = vgg.cuda()
edgeGenerator = edgeGenerator.cuda()
# 开始训练
for i in range(e, opt.epoches):
# 调用数据加载函数,加载训练集
data = dataSet(data_dir, batchsize=opt.batchsize, shuffle=True)
# 根据训练集,对网络模型的权重进行迭代更新
for ii, (img, _) in enumerate(data):
if opt.use_gpu:
img = img.cuda()
img_raw, msk_in, img_in = torch.chunk(img, 3, 3)
msk_in = (msk_in + 1) // 2
msk_in = msk_in[:, 0:1, :, :]
if i < opt.section1:
data_in = torch.cat((img_in, 1 - msk_in), 1)
optimizer_G1.zero_grad()
# 前向
img_gen1 = netG1(data_in)
# 计算损失函数
losses["l1"] = loss_l1(opt, img_raw, img_gen1)
losses["l2"] = loss_l2(opt, img_raw, img_gen1)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen1*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen1, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen1, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen1, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen1)
loss_G1 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G1.data))
# 反向传播
loss_G1.backward()
optimizer_G1.step()
elif i < opt.section2:
# 第二阶段的训练
data_in1 = torch.cat((img_in, 1 - msk_in), 1)
optimizer_D.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2).detach()
dis_real, dis_real_s = netD(img_raw)
dis_gen, dis_gen_s = netD(img_gen2)
losses["dloss_gen"] = loss_gan(opt, false_labels, dis_gen_s)
losses["dloss_img"] = loss_gan(opt, true_labels, dis_real_s)
loss_D = (losses["dloss_gen"] + losses["dloss_img"])/2
loss_D.backward()
optimizer_D.step()
optimizer_G2.zero_grad()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2)
dis_gen, dis_gen_s = netD(img_gen2)
losses["l1"] = loss_l1(opt, img_raw, img_gen2)
losses["l2"] = loss_l2(opt, img_raw, img_gen2)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen2*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen2, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen2, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen2, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen2)
losses["gloss"] = loss_gan(opt, true_labels, dis_gen_s)
loss_G2 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"] + \
losses_weight["dcgan"] * losses["gloss"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G2.data))
loss_G2.backward()
optimizer_G2.step()
else:
data_in1 = torch.cat((img_in, 1 - msk_in), 1)
optimizer_D.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2).detach()
dis_real, dis_real_s = netD(img_raw)
dis_gen, dis_gen_s = netD(img_gen2)
losses["dloss_gen"] = loss_gan(opt, false_labels, dis_gen_s)
losses["dloss_img"] = loss_gan(opt, true_labels, dis_real_s)
loss_D = (losses["dloss_gen"] + losses["dloss_img"])/2
loss_D.backward()
optimizer_D.step()
optimizer_G1.zero_grad()
img_gen1 = netG1(data_in1)
dis_gen, dis_gen_s = netD(img_gen1)
losses["l1"] = loss_l1(opt, img_raw, img_gen1)
losses["l2"] = loss_l2(opt, img_raw, img_gen1)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen1*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen1, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen1, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen1, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen1)
loss_G1 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G1.data))
loss_G1.backward()
optimizer_G1.step()
optimizer_G2.zero_grad()
img_gen1 = netG1(data_in1).detach()
data_in2 = img_in * msk_in + img_gen1 * (1 - msk_in)
img_gen2 = netG2(data_in2)
dis_gen, dis_gen_s = netD(img_gen2)
losses["l1"] = loss_l1(opt, img_raw, img_gen2)
losses["l2"] = loss_l2(opt, img_raw, img_gen2)
losses["l1_hole"] = loss_l1(opt, img_raw*(1-msk_in), img_gen2*(1-msk_in))
losses["perpectual"] = loss_perpectual(opt, img_raw, img_gen2, vgg)
losses["style"] = loss_style(opt, img_raw, img_gen2, vgg)
losses["edge"] = loss_edge(opt, img_raw, img_gen2, edgeGenerator)
losses["tv"] = loss_tv(opt, img_gen2)
losses["gloss"] = loss_gan(opt, true_labels, dis_gen_s)
loss_G2 = losses_weight["l1"] * losses["l1"] + \
losses_weight["l2"] * losses["l2"] + \
losses_weight["perpectual"] * losses["perpectual"] + \
losses_weight["style"] * losses["style"] + \
losses_weight["tv"] * losses["tv"] + \
losses_weight["edge"] * losses["edge"] + \
losses_weight["dcgan"] * losses["gloss"]
print("epochs:{:d} batches:{:d} gloss:{:.3f}".format(i, ii, loss_G2.data))
loss_G2.backward()
optimizer_G2.step()
if (i+1) % 2 == 0 and (ii+1) % 25 == 0:
tv.utils.save_image(img_in[0], '%s/%s_img.png' %(opt.result_pth, ii+1), normalize=True)
tv.utils.save_image(img_gen1[0], '%s/%s_rst1.png' %(opt.result_pth, ii+1), normalize=True)
if i >= opt.section1:
tv.utils.save_image(img_gen2[0], '%s/%s_rst2.png' %(opt.result_pth, ii+1), normalize=True)
if i < opt.section1:
print("l1:{:.5f} l2:{:.5f} p:{:.5f} s:{:.5f} e:{:.5f} tv:{:.5f}".format(
losses["l1"].data, losses["l2"].data, losses["perpectual"].data,
losses["style"].data, losses["edge"].data, losses["tv"].data))
else:
print("l1:{:.5f} l2:{:.5f} p:{:.5f} s:{:.5f} e:{:.5f} tv:{:.5f} dc_g:{:.5f}".format(
losses["l1"].data, losses["l2"].data, losses["perpectual"].data,
losses["style"].data, losses["edge"].data, losses["tv"].data, losses["gloss"].data))
# 保存模型
if (i+1)%10 == 0:
gen1_weights_path = os.path.join(opt.model_pth, "generator1_"+str(i+1)+".pth")
gen2_weights_path = os.path.join(opt.model_pth, "generator2_"+str(i+1)+".pth")
dis_weights_path = os.path.join(opt.model_pth, "discriminator_"+str(i+1)+".pth")
else:
gen1_weights_path = os.path.join(opt.model_pth, "generator1.pth")
gen2_weights_path = os.path.join(opt.model_pth, "generator2.pth")
dis_weights_path = os.path.join(opt.model_pth, "discriminator.pth")
if not os.path.isdir(opt.model_pth):
os.mkdir(opt.model_pth)
torch.save({
'iteration': i+1,
'generator1': netG1.state_dict()
}, gen1_weights_path)
torch.save({
'iteration': i+1,
'generator2': netG2.state_dict()
}, gen2_weights_path)
torch.save({
'discriminator': netD.state_dict()
}, dis_weights_path)
| zh | 0.884724 | 模型初始化 # nn.init.constant_(m.bias, 0) # 指定GPU # 初始化权重保存路径 # 初始化模型 # 判断是否存在预训练模型,若存在,则先加载模型,否则进行初始化 # 常用于断点训练 # e = 0 # 初始化优化函数 # 加载VGG16,用于计算损失函数 # true_labels, false_labels = true_labels.cuda(), false_labels.cuda() # 开始训练 # 调用数据加载函数,加载训练集 # 根据训练集,对网络模型的权重进行迭代更新 # 前向 # 计算损失函数 # 反向传播 # 第二阶段的训练 # 保存模型 | 2.355589 | 2 |
howto/2.py | wannaphongcom/numfa_server | 0 | 6614567 | # -*- coding: utf-8 -*-
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
chatbot = ChatBot(
'Charlie', # ชื่อแชตบ็อต
storage_adapter='chatterbot.storage.SQLStorageAdapter', # กำหนดการจัดเก็บ ในที่นี้เลือก chatterbot.storage.SQLStorageAdapter เก็บเป็น Sqllite
database='Charlie.sqlite3' # ที่ตั้งฐานข้อมูล
)
chatbot.set_trainer(ChatterBotCorpusTrainer) # กำหนดให้ Train จากชุดข้อมูลของ Chatterbot
chatbot.train(
"chatterbot.corpus.english"
) # เรียกใช้ชุดข้อมูล chatterbot.corpus.english
text=""
while True:
text=input("Text : ")
if text=="exit":
break
response = chatbot.get_response(text)
print(response) | # -*- coding: utf-8 -*-
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
chatbot = ChatBot(
'Charlie', # ชื่อแชตบ็อต
storage_adapter='chatterbot.storage.SQLStorageAdapter', # กำหนดการจัดเก็บ ในที่นี้เลือก chatterbot.storage.SQLStorageAdapter เก็บเป็น Sqllite
database='Charlie.sqlite3' # ที่ตั้งฐานข้อมูล
)
chatbot.set_trainer(ChatterBotCorpusTrainer) # กำหนดให้ Train จากชุดข้อมูลของ Chatterbot
chatbot.train(
"chatterbot.corpus.english"
) # เรียกใช้ชุดข้อมูล chatterbot.corpus.english
text=""
while True:
text=input("Text : ")
if text=="exit":
break
response = chatbot.get_response(text)
print(response) | th | 0.99475 | # -*- coding: utf-8 -*- # ชื่อแชตบ็อต # กำหนดการจัดเก็บ ในที่นี้เลือก chatterbot.storage.SQLStorageAdapter เก็บเป็น Sqllite # ที่ตั้งฐานข้อมูล # กำหนดให้ Train จากชุดข้อมูลของ Chatterbot # เรียกใช้ชุดข้อมูล chatterbot.corpus.english | 3.181229 | 3 |
pytorch/discriminator.py | skoc/julia-project | 0 | 6614568 | <reponame>skoc/julia-project
def FC3DDiscriminator():
pass | def FC3DDiscriminator():
pass | none | 1 | 0.905682 | 1 | |
xu/src/res/Manage.py | sonnts996/XuCompa-Request | 0 | 6614569 | import os
from xu.src.res import resources, resource2qrc
def initResource():
resources.qInitResources()
def runQrc(build=False):
if build:
resource2qrc.run(os.path.dirname(resource2qrc.__file__))
else:
resource2qrc.runWithoutBuildRes(os.path.dirname(resource2qrc.__file__))
| import os
from xu.src.res import resources, resource2qrc
def initResource():
resources.qInitResources()
def runQrc(build=False):
if build:
resource2qrc.run(os.path.dirname(resource2qrc.__file__))
else:
resource2qrc.runWithoutBuildRes(os.path.dirname(resource2qrc.__file__))
| none | 1 | 2.11016 | 2 | |
Server/urls.py | AkashSasank/Covid-19-X-ray-scanner | 1 | 6614570 | host_url = 'http://127.0.0.1:5000'
# host_url = "https://covid-xray-scanner.herokuapp.com"
urls = {
'home': host_url,
"test_url": host_url + "/x-ray-test",
"results_url": host_url + "/test-result",
"error": host_url + "/error500"
}
| host_url = 'http://127.0.0.1:5000'
# host_url = "https://covid-xray-scanner.herokuapp.com"
urls = {
'home': host_url,
"test_url": host_url + "/x-ray-test",
"results_url": host_url + "/test-result",
"error": host_url + "/error500"
}
| en | 0.716147 | # host_url = "https://covid-xray-scanner.herokuapp.com" | 1.622049 | 2 |
aslam_offline_calibration/kalibr/python/get_local_time.py | JzHuai0108/kalibr | 10 | 6614571 | """
Load data with local and remote time, get the corrected local time synced to the remote time, and save the timestamps.
"""
import os
import numpy as np
import sm
import argparse
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('inlog',
help= "data log file in csv format. Each row has local host time and remote device time in seconds")
parser.add_argument(
'--localtimeindex',
type=int, default=1,
help="1-based column index of the local host time in the log. (default: %(default)s)"
)
parser.add_argument(
'--remotetimeindex',
type=int, default=2,
help="1-based column index of the local host time in the log. (default: %(default)s)"
)
parser.add_argument(
'--outlog',
help="output log file. (default: %(default)s)")
return parser.parse_args()
def main():
args = parseArgs()
data = np.loadtxt(args.inlog, delimiter=',')
remotetimes = data[:, args.remotetimeindex - 1]
localtimes = data[:, args.localtimeindex - 1]
outputlog = args.outlog
if not args.outlog:
inlognoext = os.path.splitext(args.inlog)[0]
outputlog = inlognoext + "-syncedlocaltimes.log"
timestamp_corrector = sm.DoubleTimestampCorrector()
for i, remotetime in enumerate(remotetimes):
timestamp_corrector.correctTimestamp(remotetime, localtimes[i])
correctedtimes = []
for i, remotetime in enumerate(remotetimes):
correctedtimes.append(timestamp_corrector.getLocalTime(remotetime))
np.savetxt(outputlog, correctedtimes, fmt="%.9f", delimiter=",")
print('Saved corrected local time in {}'.format(outputlog))
if __name__ == '__main__':
main()
| """
Load data with local and remote time, get the corrected local time synced to the remote time, and save the timestamps.
"""
import os
import numpy as np
import sm
import argparse
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('inlog',
help= "data log file in csv format. Each row has local host time and remote device time in seconds")
parser.add_argument(
'--localtimeindex',
type=int, default=1,
help="1-based column index of the local host time in the log. (default: %(default)s)"
)
parser.add_argument(
'--remotetimeindex',
type=int, default=2,
help="1-based column index of the local host time in the log. (default: %(default)s)"
)
parser.add_argument(
'--outlog',
help="output log file. (default: %(default)s)")
return parser.parse_args()
def main():
args = parseArgs()
data = np.loadtxt(args.inlog, delimiter=',')
remotetimes = data[:, args.remotetimeindex - 1]
localtimes = data[:, args.localtimeindex - 1]
outputlog = args.outlog
if not args.outlog:
inlognoext = os.path.splitext(args.inlog)[0]
outputlog = inlognoext + "-syncedlocaltimes.log"
timestamp_corrector = sm.DoubleTimestampCorrector()
for i, remotetime in enumerate(remotetimes):
timestamp_corrector.correctTimestamp(remotetime, localtimes[i])
correctedtimes = []
for i, remotetime in enumerate(remotetimes):
correctedtimes.append(timestamp_corrector.getLocalTime(remotetime))
np.savetxt(outputlog, correctedtimes, fmt="%.9f", delimiter=",")
print('Saved corrected local time in {}'.format(outputlog))
if __name__ == '__main__':
main()
| en | 0.887118 | Load data with local and remote time, get the corrected local time synced to the remote time, and save the timestamps. | 2.965813 | 3 |
app/db/models/dues_and_cope_track_2yrs.py | johnebehr/tseu_sandbox | 0 | 6614572 | from sqlalchemy import Table
from app.db.database import Base, metadata
class Dues_And_Cope_Track_2yrs(Base):
"""Map the existing dues_and_cope_track_2yrs table"""
__table__ = Table("dues_and_cope_track_2yrs", metadata, autoload=True) | from sqlalchemy import Table
from app.db.database import Base, metadata
class Dues_And_Cope_Track_2yrs(Base):
"""Map the existing dues_and_cope_track_2yrs table"""
__table__ = Table("dues_and_cope_track_2yrs", metadata, autoload=True) | en | 0.197918 | Map the existing dues_and_cope_track_2yrs table | 2.239404 | 2 |
test.py | bricdu/img_recognition | 1 | 6614573 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
import os,cv2,glob
import sys,argparse
import matplotlib.pyplot as plt
import time
# In[4]:
image_size=128
num_channels=3
images=[]
result_label=[]
dir_label=[]
dir_name=[]
fault_file_name=[]
path="./data/12/"
train_path = './data/dc_data/'
def get_cls(train_path):
classes=[]
glob_path = train_path + '*'
full_files = sorted(glob.glob(glob_path))
for i in range(len(full_files)):
s_f = full_files[i].split(sep='\\')
classes.append(s_f[-1])
print('类别: ' + str(classes))
return classes
classes=get_cls(train_path)
#print('类别: ' + str(classes))
def show_pic(img,name,top):
plt.figure('show_img')
#img=np.multiply(img,255.0)
# print(img)
plt.imshow(img)
plt.axis('off')
plt.title('top1 cls: '+name + ' PR: ' + str(round(top,4))) # 图像题目
plt.text(30, 70, name,
bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10}) # 显示在图片上
plt.show()
def get_files(file_name):
dir_name.append(file_name)
name = file_name.split(sep='.')
#print(name[0])# 因为照片的格式是cat.1.jpg/cat.2.jpg
if name[0] == 'dog': # 所以只用读取 . 前面这个字符串
dir_label.append(1)
else:
dir_label.append(0)
def totla_file():
cat_num=0
dog_num=0
for i in range(len(dir_label)):
if dir_label[i] == 1:
cat_num+=1
else:
dog_num+=1
#print(dir_label)
print("狗有 %d 个,猫有 %d 个"%(dog_num,cat_num))
def find_fault(result,original):
fault_num=0
fault_label=[]
for i in range(len(original)):
if result[i] != original[i]:
fault_num+=1
fault_label.append(1)
else:
fault_label.append(0)
correct_num = len(original) - fault_num
pricent = correct_num / len(original)
return pricent , fault_label
direct=os.listdir(path)
for file in direct:
image=cv2.imread(path+file)
#print("adress:",path+file)
get_files(file)
image=cv2.resize(image,(image_size,image_size),0,0,cv2.INTER_LINEAR)
images.append(image)
totla_file()
images=np.array(images,dtype=np.float32)
check_img = images
images=images.astype('float32')
images=np.multiply(images,1.0/255.0)
sess=tf.Session()
saver=tf.train.import_meta_graph('./model_25k/dog-cat.ckpt-118849.meta')
saver.restore(sess,'./model_25k/dog-cat.ckpt-118849')
check_num = 0
start_time = time.time()
for img in images:
x_batch=img.reshape(1,image_size,image_size,num_channels)
#sess=tf.Session()
#step1网络结构图
#saver=tf.train.import_meta_graph('./dogs-cats-model/dog-cat.ckpt-7496.meta')
#step2加载权重参数
#saver.restore(sess,'./dogs-cats-model/dog-cat.ckpt-7496')
#获取默认的图
graph=tf.get_default_graph()
y_pred=graph.get_tensor_by_name("y_pred:0")
x=graph.get_tensor_by_name("x:0")
y_true=graph.get_tensor_by_name("y_true:0")
y_test_images=np.zeros((1,2))
feed_dict_testing={x:x_batch,y_true:y_test_images}
result=sess.run(y_pred,feed_dict_testing)
# res_label=['dog','cat']
res_label=classes
result_num=result.argmax()
######################################################
show_pic(img,res_label[result_num],result[0][result_num])
#print(res_label[result_num])
# print(img)
#######################################################
#check_num+=1
#time.sleep(1)
result_label.append (result_num )
end_time = time.time()
print('耗时: ' + str(end_time - start_time) + 's')
pre_corr , fault = find_fault(result_label,dir_label)
#print(pre_corr,fault)
for i in range(len(result_label)):
if fault[i]==1:
fault_file_name.append(dir_name[i])
print("正确率为: " + str(round(pre_corr,5)*100) + '%')
print("错误的文件名为: ",fault_file_name)
# In[ ]:
# In[ ]:
| #!/usr/bin/env python
# coding: utf-8
# In[1]:
import tensorflow as tf
import numpy as np
import os,cv2,glob
import sys,argparse
import matplotlib.pyplot as plt
import time
# In[4]:
image_size=128
num_channels=3
images=[]
result_label=[]
dir_label=[]
dir_name=[]
fault_file_name=[]
path="./data/12/"
train_path = './data/dc_data/'
def get_cls(train_path):
classes=[]
glob_path = train_path + '*'
full_files = sorted(glob.glob(glob_path))
for i in range(len(full_files)):
s_f = full_files[i].split(sep='\\')
classes.append(s_f[-1])
print('类别: ' + str(classes))
return classes
classes=get_cls(train_path)
#print('类别: ' + str(classes))
def show_pic(img,name,top):
plt.figure('show_img')
#img=np.multiply(img,255.0)
# print(img)
plt.imshow(img)
plt.axis('off')
plt.title('top1 cls: '+name + ' PR: ' + str(round(top,4))) # 图像题目
plt.text(30, 70, name,
bbox={'facecolor': 'white', 'alpha': 0.8, 'pad': 10}) # 显示在图片上
plt.show()
def get_files(file_name):
dir_name.append(file_name)
name = file_name.split(sep='.')
#print(name[0])# 因为照片的格式是cat.1.jpg/cat.2.jpg
if name[0] == 'dog': # 所以只用读取 . 前面这个字符串
dir_label.append(1)
else:
dir_label.append(0)
def totla_file():
cat_num=0
dog_num=0
for i in range(len(dir_label)):
if dir_label[i] == 1:
cat_num+=1
else:
dog_num+=1
#print(dir_label)
print("狗有 %d 个,猫有 %d 个"%(dog_num,cat_num))
def find_fault(result,original):
fault_num=0
fault_label=[]
for i in range(len(original)):
if result[i] != original[i]:
fault_num+=1
fault_label.append(1)
else:
fault_label.append(0)
correct_num = len(original) - fault_num
pricent = correct_num / len(original)
return pricent , fault_label
direct=os.listdir(path)
for file in direct:
image=cv2.imread(path+file)
#print("adress:",path+file)
get_files(file)
image=cv2.resize(image,(image_size,image_size),0,0,cv2.INTER_LINEAR)
images.append(image)
totla_file()
images=np.array(images,dtype=np.float32)
check_img = images
images=images.astype('float32')
images=np.multiply(images,1.0/255.0)
sess=tf.Session()
saver=tf.train.import_meta_graph('./model_25k/dog-cat.ckpt-118849.meta')
saver.restore(sess,'./model_25k/dog-cat.ckpt-118849')
check_num = 0
start_time = time.time()
for img in images:
x_batch=img.reshape(1,image_size,image_size,num_channels)
#sess=tf.Session()
#step1网络结构图
#saver=tf.train.import_meta_graph('./dogs-cats-model/dog-cat.ckpt-7496.meta')
#step2加载权重参数
#saver.restore(sess,'./dogs-cats-model/dog-cat.ckpt-7496')
#获取默认的图
graph=tf.get_default_graph()
y_pred=graph.get_tensor_by_name("y_pred:0")
x=graph.get_tensor_by_name("x:0")
y_true=graph.get_tensor_by_name("y_true:0")
y_test_images=np.zeros((1,2))
feed_dict_testing={x:x_batch,y_true:y_test_images}
result=sess.run(y_pred,feed_dict_testing)
# res_label=['dog','cat']
res_label=classes
result_num=result.argmax()
######################################################
show_pic(img,res_label[result_num],result[0][result_num])
#print(res_label[result_num])
# print(img)
#######################################################
#check_num+=1
#time.sleep(1)
result_label.append (result_num )
end_time = time.time()
print('耗时: ' + str(end_time - start_time) + 's')
pre_corr , fault = find_fault(result_label,dir_label)
#print(pre_corr,fault)
for i in range(len(result_label)):
if fault[i]==1:
fault_file_name.append(dir_name[i])
print("正确率为: " + str(round(pre_corr,5)*100) + '%')
print("错误的文件名为: ",fault_file_name)
# In[ ]:
# In[ ]:
| zh | 0.112646 | #!/usr/bin/env python # coding: utf-8 # In[1]: # In[4]: #print('类别: ' + str(classes)) #img=np.multiply(img,255.0) # print(img) # 图像题目 # 显示在图片上 #print(name[0])# 因为照片的格式是cat.1.jpg/cat.2.jpg # 所以只用读取 . 前面这个字符串 #print(dir_label) #print("adress:",path+file) #sess=tf.Session() #step1网络结构图 #saver=tf.train.import_meta_graph('./dogs-cats-model/dog-cat.ckpt-7496.meta') #step2加载权重参数 #saver.restore(sess,'./dogs-cats-model/dog-cat.ckpt-7496') #获取默认的图 # res_label=['dog','cat'] ###################################################### #print(res_label[result_num]) # print(img) ####################################################### #check_num+=1 #time.sleep(1) #print(pre_corr,fault) # In[ ]: # In[ ]: | 2.535539 | 3 |
test/layers_test.py | timgates42/theanets | 314 | 6614574 | import numpy as np
import pytest
import theanets
import theano.tensor as TT
import util as u
NI = u.NUM_INPUTS
NH = u.NUM_HID1
class TestFeedforward:
@pytest.mark.parametrize('form, name, params, count, outputs', [
('feedforward', 'feedforward', 'w b', 1 + NI, 'out pre'),
('ff', 'feedforward', 'w b', 1 + NI, 'out pre'),
('classifier', 'classifier', 'w b', 1 + NI, 'out pre'),
('flatten', 'flatten', '', 0, 'out'),
('flat', 'flatten', '', 0, 'out'),
('concatenate', 'concatenate', '', 0, 'out'),
('concat', 'concatenate', '', 0, 'out'),
('product', 'product', '', 0, 'out'),
('prod', 'product', '', 0, 'out'),
])
def test_build(self, form, name, params, count, outputs):
layer = theanets.Layer.build(form, size=NI, name='l', inputs='in')
layer.bind(theanets.Network([NI]))
assert layer.__class__.__name__.lower() == name
assert sorted(p.name for p in layer.params) == \
sorted('l.' + p for p in params.split())
assert sum(np.prod(p.get_value().shape) for p in layer.params) == count * NI
out, upd = layer.connect({'in:out': TT.matrix('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
assert layer.to_spec() == dict(
form=name, name='l', size=NI, inputs='in',
activation=layer.kwargs.get('activation', 'relu'))
@pytest.mark.parametrize('layer', [
NH,
dict(form='ff', inputs=('hid1', 'hid2'), size=NH),
dict(form='tied', partner='hid1'),
dict(form='prod', inputs=('hid1', 'hid2'), size=NH),
dict(form='concat', inputs=('hid1', 'hid2'), size=2 * NH),
('flat', NH),
])
def test_predict(self, layer):
net = theanets.Autoencoder([NI, NH, NH, layer, NI])
assert net.predict(u.INPUTS).shape == (u.NUM_EXAMPLES, NI)
def test_multiple_inputs(self):
layer = theanets.layers.Feedforward(inputs=('in', 'hid1'), size=NH, name='l')
layer.bind(theanets.Network([NH, NH, NH]))
total = sum(np.prod(p.get_value().shape) for p in layer.params)
assert total == (1 + 2 * NH) * NH
assert sorted(p.name for p in layer.params) == \
['l.b', 'l.w_hid1:out', 'l.w_in:out']
assert layer.to_spec() == dict(
form='feedforward', name='l', size=NH, activation='relu',
inputs=('in', 'hid1'))
def test_reshape(self):
layer = theanets.layers.Reshape(inputs='in', shape=(4, 2), name='l')
layer.bind(theanets.Network([8]))
assert sum(np.prod(p.get_value().shape) for p in layer.params) == 0
assert sorted(p.name for p in layer.params) == []
assert layer.to_spec() == dict(
form='reshape', name='l', shape=(4, 2), inputs='in',
activation='relu')
class TestRecurrent:
@pytest.mark.parametrize('form, kwargs, count, params, outputs', [
('rnn', {}, 1 + NI + NH, 'xh hh b', 'out pre'),
('clockwork', {'periods': (1, 2, 4, 8)}, 1 + NI + NH, 'xh hh b', 'out pre'),
('rrnn', {'rate': 'uniform'}, 1 + NI + NH, 'xh hh b', 'out pre rate hid'),
('rrnn', {'rate': 'log'}, 1 + NI + NH, 'xh hh b', 'out pre rate hid'),
('rrnn', {'rate': 'vector'}, 2 + NI + NH, 'xh hh b r', 'out pre rate hid'),
('rrnn', {'rate': 'matrix'}, 2 + NH + 2 * NI, 'xh hh b r xr', 'out pre rate hid'),
('gru', {}, 3 * (1 + NI + NH), 'b w hh hr hz', 'hid out pre rate'),
('mut1', {}, 3 + 3 * NI + 2 * NH, 'bh br bz hh hr xh xr xz', 'hid out pre rate'),
('scrn', {}, 2 * (1 + NI + 2 * NH), 'w ho so hh sh b r', 'out hid rate state'),
('lstm', {}, 7 + 4 * NH + 4 * NI, 'xh hh b cf ci co', 'out cell'),
('conv1', {'filter_size': 13}, 1 + 13 * NI, 'w b', 'pre out'),
('mrnn', {'factors': 3}, (7 + NI) * NH + 3 * NI, 'xh xf hf fh b',
'out pre factors'),
('bidirectional', {}, 1 + NI + NH // 2,
'l_bw.b l_bw.hh l_bw.xh l_fw.b l_fw.xh l_fw.hh',
'bw_out bw_pre fw_out fw_pre out pre'),
])
def test_build(self, form, kwargs, count, params, outputs):
layer = theanets.Layer.build(form, size=NH, name='l', inputs='in', **kwargs)
layer.bind(theanets.Network([dict(size=NI, ndim=3)]))
assert layer.__class__.__name__.lower() == form
expected = sorted('l.' + p for p in params.split())
if form == 'bidirectional':
expected = sorted(params.split())
assert sorted(p.name for p in layer.params) == expected
expected = count * NH
if form == 'mrnn':
expected = count
assert sum(np.prod(p.get_value().shape) for p in layer.params) == expected
out, upd = layer.connect({'in:out': TT.tensor3('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
spec = {}
if form == 'mrnn':
spec['factors'] = 3
if form == 'bidirectional':
spec['worker'] = 'rnn'
if form == 'clockwork':
spec['periods'] = (1, 2, 4, 8)
if form == 'scrn':
spec['s_0'] = None
spec['context_size'] = int(1 + np.sqrt(NH))
if form == 'lstm':
spec['c_0'] = None
if form not in ('bidirectional', 'conv1'):
spec['h_0'] = None
assert layer.to_spec() == dict(
form=form, name='l', size=NH, inputs='in',
activation=layer.kwargs.get('activation', 'relu'), **spec)
@pytest.mark.parametrize('layer', [
(NH, 'rnn'),
dict(size=NH, form='conv1', filter_size=13),
])
def test_predict(self, layer):
T = u.RNN.NUM_TIMES
if isinstance(layer, dict) and layer.get('form') == 'conv1':
T -= layer['filter_size'] - 1
net = theanets.recurrent.Autoencoder([NI, NH, NH, layer, NI])
assert net.predict(u.RNN.INPUTS).shape == (u.NUM_EXAMPLES, T, NI)
class TestConvolution:
@pytest.mark.parametrize('form, kwargs, count, params, outputs', [
('conv2', {'filter_size': u.CNN.FILTER_SIZE},
1 + NI * u.CNN.FILTER_HEIGHT * u.CNN.FILTER_WIDTH, 'w b', 'out pre'),
])
def test_build(self, form, kwargs, count, params, outputs):
layer = theanets.Layer.build(form, size=NH, name='l', inputs='in', **kwargs)
layer.bind(theanets.Network([dict(size=NI, ndim=4)]))
assert layer.__class__.__name__.lower() == form
expected = sorted('l.' + p for p in params.split())
assert sorted(p.name for p in layer.params) == expected
expected = count * NH
assert sum(np.prod(p.get_value().shape) for p in layer.params) == expected
out, upd = layer.connect({'in:out': TT.tensor4('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
assert layer.to_spec() == dict(
form=form, name='l', size=NH, inputs='in', activation='relu')
@pytest.mark.parametrize('layer', [
dict(size=NH, form='conv2', filter_size=u.CNN.FILTER_SIZE),
])
def test_predict(self, layer):
net = theanets.convolution.Regressor([
(u.CNN.NUM_WIDTH, u.CNN.NUM_HEIGHT, NI),
NH, layer, 'flat', u.NUM_OUTPUTS])
assert net.predict(u.CNN.INPUTS).shape == (u.NUM_EXAMPLES, u.NUM_OUTPUTS)
| import numpy as np
import pytest
import theanets
import theano.tensor as TT
import util as u
NI = u.NUM_INPUTS
NH = u.NUM_HID1
class TestFeedforward:
@pytest.mark.parametrize('form, name, params, count, outputs', [
('feedforward', 'feedforward', 'w b', 1 + NI, 'out pre'),
('ff', 'feedforward', 'w b', 1 + NI, 'out pre'),
('classifier', 'classifier', 'w b', 1 + NI, 'out pre'),
('flatten', 'flatten', '', 0, 'out'),
('flat', 'flatten', '', 0, 'out'),
('concatenate', 'concatenate', '', 0, 'out'),
('concat', 'concatenate', '', 0, 'out'),
('product', 'product', '', 0, 'out'),
('prod', 'product', '', 0, 'out'),
])
def test_build(self, form, name, params, count, outputs):
layer = theanets.Layer.build(form, size=NI, name='l', inputs='in')
layer.bind(theanets.Network([NI]))
assert layer.__class__.__name__.lower() == name
assert sorted(p.name for p in layer.params) == \
sorted('l.' + p for p in params.split())
assert sum(np.prod(p.get_value().shape) for p in layer.params) == count * NI
out, upd = layer.connect({'in:out': TT.matrix('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
assert layer.to_spec() == dict(
form=name, name='l', size=NI, inputs='in',
activation=layer.kwargs.get('activation', 'relu'))
@pytest.mark.parametrize('layer', [
NH,
dict(form='ff', inputs=('hid1', 'hid2'), size=NH),
dict(form='tied', partner='hid1'),
dict(form='prod', inputs=('hid1', 'hid2'), size=NH),
dict(form='concat', inputs=('hid1', 'hid2'), size=2 * NH),
('flat', NH),
])
def test_predict(self, layer):
net = theanets.Autoencoder([NI, NH, NH, layer, NI])
assert net.predict(u.INPUTS).shape == (u.NUM_EXAMPLES, NI)
def test_multiple_inputs(self):
layer = theanets.layers.Feedforward(inputs=('in', 'hid1'), size=NH, name='l')
layer.bind(theanets.Network([NH, NH, NH]))
total = sum(np.prod(p.get_value().shape) for p in layer.params)
assert total == (1 + 2 * NH) * NH
assert sorted(p.name for p in layer.params) == \
['l.b', 'l.w_hid1:out', 'l.w_in:out']
assert layer.to_spec() == dict(
form='feedforward', name='l', size=NH, activation='relu',
inputs=('in', 'hid1'))
def test_reshape(self):
layer = theanets.layers.Reshape(inputs='in', shape=(4, 2), name='l')
layer.bind(theanets.Network([8]))
assert sum(np.prod(p.get_value().shape) for p in layer.params) == 0
assert sorted(p.name for p in layer.params) == []
assert layer.to_spec() == dict(
form='reshape', name='l', shape=(4, 2), inputs='in',
activation='relu')
class TestRecurrent:
@pytest.mark.parametrize('form, kwargs, count, params, outputs', [
('rnn', {}, 1 + NI + NH, 'xh hh b', 'out pre'),
('clockwork', {'periods': (1, 2, 4, 8)}, 1 + NI + NH, 'xh hh b', 'out pre'),
('rrnn', {'rate': 'uniform'}, 1 + NI + NH, 'xh hh b', 'out pre rate hid'),
('rrnn', {'rate': 'log'}, 1 + NI + NH, 'xh hh b', 'out pre rate hid'),
('rrnn', {'rate': 'vector'}, 2 + NI + NH, 'xh hh b r', 'out pre rate hid'),
('rrnn', {'rate': 'matrix'}, 2 + NH + 2 * NI, 'xh hh b r xr', 'out pre rate hid'),
('gru', {}, 3 * (1 + NI + NH), 'b w hh hr hz', 'hid out pre rate'),
('mut1', {}, 3 + 3 * NI + 2 * NH, 'bh br bz hh hr xh xr xz', 'hid out pre rate'),
('scrn', {}, 2 * (1 + NI + 2 * NH), 'w ho so hh sh b r', 'out hid rate state'),
('lstm', {}, 7 + 4 * NH + 4 * NI, 'xh hh b cf ci co', 'out cell'),
('conv1', {'filter_size': 13}, 1 + 13 * NI, 'w b', 'pre out'),
('mrnn', {'factors': 3}, (7 + NI) * NH + 3 * NI, 'xh xf hf fh b',
'out pre factors'),
('bidirectional', {}, 1 + NI + NH // 2,
'l_bw.b l_bw.hh l_bw.xh l_fw.b l_fw.xh l_fw.hh',
'bw_out bw_pre fw_out fw_pre out pre'),
])
def test_build(self, form, kwargs, count, params, outputs):
layer = theanets.Layer.build(form, size=NH, name='l', inputs='in', **kwargs)
layer.bind(theanets.Network([dict(size=NI, ndim=3)]))
assert layer.__class__.__name__.lower() == form
expected = sorted('l.' + p for p in params.split())
if form == 'bidirectional':
expected = sorted(params.split())
assert sorted(p.name for p in layer.params) == expected
expected = count * NH
if form == 'mrnn':
expected = count
assert sum(np.prod(p.get_value().shape) for p in layer.params) == expected
out, upd = layer.connect({'in:out': TT.tensor3('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
spec = {}
if form == 'mrnn':
spec['factors'] = 3
if form == 'bidirectional':
spec['worker'] = 'rnn'
if form == 'clockwork':
spec['periods'] = (1, 2, 4, 8)
if form == 'scrn':
spec['s_0'] = None
spec['context_size'] = int(1 + np.sqrt(NH))
if form == 'lstm':
spec['c_0'] = None
if form not in ('bidirectional', 'conv1'):
spec['h_0'] = None
assert layer.to_spec() == dict(
form=form, name='l', size=NH, inputs='in',
activation=layer.kwargs.get('activation', 'relu'), **spec)
@pytest.mark.parametrize('layer', [
(NH, 'rnn'),
dict(size=NH, form='conv1', filter_size=13),
])
def test_predict(self, layer):
T = u.RNN.NUM_TIMES
if isinstance(layer, dict) and layer.get('form') == 'conv1':
T -= layer['filter_size'] - 1
net = theanets.recurrent.Autoencoder([NI, NH, NH, layer, NI])
assert net.predict(u.RNN.INPUTS).shape == (u.NUM_EXAMPLES, T, NI)
class TestConvolution:
@pytest.mark.parametrize('form, kwargs, count, params, outputs', [
('conv2', {'filter_size': u.CNN.FILTER_SIZE},
1 + NI * u.CNN.FILTER_HEIGHT * u.CNN.FILTER_WIDTH, 'w b', 'out pre'),
])
def test_build(self, form, kwargs, count, params, outputs):
layer = theanets.Layer.build(form, size=NH, name='l', inputs='in', **kwargs)
layer.bind(theanets.Network([dict(size=NI, ndim=4)]))
assert layer.__class__.__name__.lower() == form
expected = sorted('l.' + p for p in params.split())
assert sorted(p.name for p in layer.params) == expected
expected = count * NH
assert sum(np.prod(p.get_value().shape) for p in layer.params) == expected
out, upd = layer.connect({'in:out': TT.tensor4('x')})
assert sorted(out) == sorted('l:' + o for o in outputs.split())
assert sorted(upd) == []
assert layer.to_spec() == dict(
form=form, name='l', size=NH, inputs='in', activation='relu')
@pytest.mark.parametrize('layer', [
dict(size=NH, form='conv2', filter_size=u.CNN.FILTER_SIZE),
])
def test_predict(self, layer):
net = theanets.convolution.Regressor([
(u.CNN.NUM_WIDTH, u.CNN.NUM_HEIGHT, NI),
NH, layer, 'flat', u.NUM_OUTPUTS])
assert net.predict(u.CNN.INPUTS).shape == (u.NUM_EXAMPLES, u.NUM_OUTPUTS)
| none | 1 | 2.097162 | 2 | |
src/correction_network/dataset.py | Stanford-NavLab/deep_gnss | 11 | 6614575 | ########################################################################
# Author(s): <NAME>, <NAME>
# Date: 21 September 2021
# Desc: Create PyTorch DataLoader for simulated measurements
########################################################################
import sys, os, csv
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import random
from numpy.random import default_rng
from gnss_lib.sim_gnss import expected_measures
from gnss_lib.utils import datetime_to_tow
from gnss_lib import coordinates as coord
def load_datasets(config, transforms=None):
# Different kinds of simulated datasets each of which has its own folder
# Dataset loader to handle differnt folders. For a heirarchy where we have different files with different entries (different measurement and ephemeris files I think)
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
random.shuffle(dirs)
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
yield Sim_GNSS_Dataset(_conf)
def list_datasets(config, transforms=None):
# Same as the previous but with files
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
ret = []
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
ret.append(Sim_GNSS_Dataset(_conf))
return ret
class Sim_GNSS_Dataset_Snap(Dataset):
def __init__(self, config, transforms=None):
self.root = config['root']
data_dir = config['measurement_dir']
# init_dir = config['initialization_dir']
# info_path = config['info_path']
self.max_open_files = config['max_open_files'] #cache size
self.guess_range = config['guess_range']
self.transform = transforms
# Save number of entries in each file
# self.info = pd.read_csv(os.path.join(self.root, info_path))
# self.timestep_counts = {row['id'] : row['len'] for row in self.info.iterrows()}
self.timestep_counts = {}
self.use_biases = bool(config['use_biases'])
# Save file paths
file_paths = {}
seed_values = {}
for file_path in os.listdir(os.path.join(self.root, data_dir)):
tmp_name = os.path.split(file_path)[1].split(".")[0]
traj_id, seed_id = tmp_name.split("_")
traj_id = int(traj_id)
if traj_id not in file_paths.keys():
file_paths[traj_id] = []
seed_values[traj_id] = []
file_paths[traj_id].append(os.path.join(self.root, data_dir, file_path)) # Done this way to add paths from multiple directories later
seed_values[traj_id].append(int(seed_id))
data = pd.read_csv(os.path.join(self.root, data_dir, file_path))
self.timestep_counts[traj_id] = len(data['t_idx'].unique())
self.meas_file_paths = file_paths
self.seed_values = seed_values
# file_paths = {key : [] for key in self.meas_file_paths.keys()}
# for file_path in os.listdir(os.path.join(self.root, init_dir)):
# tmp_idx = os.path.split(file_path).split(".")[0]
# traj_id, seed_id = tmp_idx.split("_")
# traj_id = int(traj_id)
# file_paths[traj_id].append(file_path) # Done this way to add paths from multiple directories later
# self.init_file_paths = file_paths
# Save number of seeds for each trajectory
self.seed_counts = {key : len(value) for (key, value) in self.meas_file_paths.items()}
self.full_counts = {key: self.seed_counts[key]*self.timestep_counts[key] for key in self.seed_counts.keys()}
self.N_total = sum(self.full_counts.values())
# Precompute indices (mapping from index to where that information is stored. index 899 -> file identifiers)
indices = []
keyList=sorted(self.full_counts.keys())
traj_idx = 0
seed_idx = 0
timestep = 0
for i in range(self.N_total):
key = keyList[traj_idx]
seed = self.seed_values[key][seed_idx]
indices.append((key, seed, timestep))
timestep += 1
if timestep>=self.timestep_counts[key]:
timestep = 0
seed_idx += 1
if seed_idx >= self.seed_counts[key]:
seed_idx = 0
traj_idx += 1
self.indices = indices
# Initialize biases
if self.use_biases:
self.biases = {}
def get_files(self, key, seed):
# Cache based manager of data files
if not hasattr(self, 'cache_traj'):
self.cache_traj = dict()
self.cache_times = dict()
# Load Trajectory file
seed_hash = str(key)+"_"+str(seed)
if seed_hash in self.cache_traj.keys():
seed_file = self.cache_traj[seed_hash]
times = self.cache_times[seed_hash]
else:
seed_file = pd.read_csv(self.meas_file_paths[key][self.seed_values[key].index(seed)])
times = seed_file['t_idx'].unique()
if len(self.cache_traj) >= self.max_open_files:
pop_key = list(self.cache_traj.keys())[0]
self.cache_traj.pop(pop_key)
self.cache_times.pop(pop_key)
self.cache_traj[seed_hash] = seed_file
self.cache_times[seed_hash] = times
# # Repeat for Seed file
# seed_hash = str(key)+"_"+str(seed_idx)
# if seed_hash in self.cache_seed.keys():
# seed_file = self.cache_seed[seed_hash]
# else:
# seed_file = pd.read_csv(self.init_file_paths[key][seed_idx])
# if len(self.cache_traj) + len(self.cache_seed) >= self.max_open_files:
# self.cache_seed.pop(list(self.cache_seed.keys())[0])
# self.cache_seed[seed_hash] = seed_file
return seed_file, times
def add_guess_noise(self, true_XYZb):
rng = default_rng()
guess_noise = np.array([rng.uniform(-self.guess_range[0], self.guess_range[0]),
rng.uniform(-self.guess_range[1], self.guess_range[1]),
rng.uniform(-self.guess_range[2], self.guess_range[2]), # x, y, z
rng.uniform(0, self.guess_range[3]), # cdt
rng.uniform(-self.guess_range[4], self.guess_range[4]),
rng.uniform(-self.guess_range[5], self.guess_range[5]),
rng.uniform(-self.guess_range[6], self.guess_range[6]), # vx, vy, vz
rng.uniform(-self.guess_range[7], self.guess_range[7]) # cdt_dot
])
return true_XYZb + guess_noise
def __getitem__(self, idx):
key, seed_idx, timestep = self.indices[idx]
seed_file, times = self.get_files(key, seed_idx)
data = seed_file[seed_file['t_idx']==times[timestep]]
gpsweek, tow = datetime_to_tow(pd.to_datetime(times[timestep]))
ephem = data.set_index('sv')
_data0 = data.iloc[0]
# Select random initialization
true_XYZb = np.array([_data0['Rxx'], _data0['Rxy'], _data0['Rxz'], _data0['b'], _data0['Rxvx'], _data0['Rxvy'], _data0['Rxvz'], _data0['b_dot']])
guess_XYZb = self.add_guess_noise(true_XYZb) # Generate guess by adding noise to groundtruth
# guess_XYZb = np.copy(true_XYZb) # 0 noise for debugging
# Transform to NED frame
ref_local = coord.LocalCoord.from_ecef(guess_XYZb[:3])
guess_NEDb = np.copy(guess_XYZb)
guess_NEDb[:3] = ref_local.ecef2ned(guess_XYZb[:3, None])[:, 0] # position
guess_NEDb[4:7] = ref_local.ecef2nedv(guess_XYZb[4:7, None])[:, 0] # velocity
true_NEDb = np.copy(true_XYZb)
true_NEDb[:3] = ref_local.ecef2ned(true_XYZb[:3, None])[:, 0] # position
true_NEDb[4:7] = ref_local.ecef2nedv(true_XYZb[4:7, None])[:, 0] # velocity
# Generate expected measures and satellite positions/velocities
measurements, satXYZV = expected_measures(gpsweek, tow, ephem, guess_XYZb[:3], guess_XYZb[3], guess_XYZb[7], guess_XYZb[4:7])
# print(measurements, satXYZV, ephem)
# Primary feature extraction
residuals = (ephem[['prange', 'doppler']] - measurements).to_numpy()
los_vector = (satXYZV[['x', 'y', 'z']] - guess_XYZb[:3])
los_vector = los_vector.div(np.sqrt(np.square(los_vector).sum(axis=1)), axis='rows').to_numpy()
los_vector = ref_local.ecef2nedv(los_vector)
# vel_sat = (satXYZV[['vx', 'vy', 'vz']]).to_numpy()
# vel_sat = ref_local.ecef2nedv(vel_sat)/2750.0 # Normalizing sat velocity
# vel_veh = np.repeat(guess_XYZb[4:7][None, :], len(vel_sat), axis=0)
# Add biases
if self.use_biases:
if idx not in self.biases.keys():
num_sats = len(residuals)
num_biased = min(np.random.poisson(1), num_sats)
sat_indices = np.arange(num_sats)
np.random.shuffle(sat_indices)
bias_vec = np.zeros(num_sats)
for sat_idx in sat_indices[:num_biased]:
bias_vec[sat_idx] = np.random.uniform(50, 200)
self.biases[idx] = bias_vec
_residuals = residuals[:, 0] + self.biases[idx]
else:
_residuals = residuals[:, 0]
# Replace with some fancier feature extraction or input to permutation invariant layer
features = np.concatenate((_residuals[:, None], los_vector), axis=1)
sample = {
'features': torch.Tensor(features),
'true_correction': (true_NEDb-guess_NEDb)[:3],
# 'satpos': satXYZV.to_numpy(),
# 'measurements': measurements.to_numpy(),
'guess': guess_XYZb
}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return int(self.N_total) | ########################################################################
# Author(s): <NAME>, <NAME>
# Date: 21 September 2021
# Desc: Create PyTorch DataLoader for simulated measurements
########################################################################
import sys, os, csv
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import random
from numpy.random import default_rng
from gnss_lib.sim_gnss import expected_measures
from gnss_lib.utils import datetime_to_tow
from gnss_lib import coordinates as coord
def load_datasets(config, transforms=None):
# Different kinds of simulated datasets each of which has its own folder
# Dataset loader to handle differnt folders. For a heirarchy where we have different files with different entries (different measurement and ephemeris files I think)
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
random.shuffle(dirs)
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
yield Sim_GNSS_Dataset(_conf)
def list_datasets(config, transforms=None):
# Same as the previous but with files
root = config['root']
dirs = [os.path.join(root, name) for name in os.listdir(root)]
ret = []
for new_root in dirs:
_conf = config.copy()
_conf['root'] = new_root
ret.append(Sim_GNSS_Dataset(_conf))
return ret
class Sim_GNSS_Dataset_Snap(Dataset):
def __init__(self, config, transforms=None):
self.root = config['root']
data_dir = config['measurement_dir']
# init_dir = config['initialization_dir']
# info_path = config['info_path']
self.max_open_files = config['max_open_files'] #cache size
self.guess_range = config['guess_range']
self.transform = transforms
# Save number of entries in each file
# self.info = pd.read_csv(os.path.join(self.root, info_path))
# self.timestep_counts = {row['id'] : row['len'] for row in self.info.iterrows()}
self.timestep_counts = {}
self.use_biases = bool(config['use_biases'])
# Save file paths
file_paths = {}
seed_values = {}
for file_path in os.listdir(os.path.join(self.root, data_dir)):
tmp_name = os.path.split(file_path)[1].split(".")[0]
traj_id, seed_id = tmp_name.split("_")
traj_id = int(traj_id)
if traj_id not in file_paths.keys():
file_paths[traj_id] = []
seed_values[traj_id] = []
file_paths[traj_id].append(os.path.join(self.root, data_dir, file_path)) # Done this way to add paths from multiple directories later
seed_values[traj_id].append(int(seed_id))
data = pd.read_csv(os.path.join(self.root, data_dir, file_path))
self.timestep_counts[traj_id] = len(data['t_idx'].unique())
self.meas_file_paths = file_paths
self.seed_values = seed_values
# file_paths = {key : [] for key in self.meas_file_paths.keys()}
# for file_path in os.listdir(os.path.join(self.root, init_dir)):
# tmp_idx = os.path.split(file_path).split(".")[0]
# traj_id, seed_id = tmp_idx.split("_")
# traj_id = int(traj_id)
# file_paths[traj_id].append(file_path) # Done this way to add paths from multiple directories later
# self.init_file_paths = file_paths
# Save number of seeds for each trajectory
self.seed_counts = {key : len(value) for (key, value) in self.meas_file_paths.items()}
self.full_counts = {key: self.seed_counts[key]*self.timestep_counts[key] for key in self.seed_counts.keys()}
self.N_total = sum(self.full_counts.values())
# Precompute indices (mapping from index to where that information is stored. index 899 -> file identifiers)
indices = []
keyList=sorted(self.full_counts.keys())
traj_idx = 0
seed_idx = 0
timestep = 0
for i in range(self.N_total):
key = keyList[traj_idx]
seed = self.seed_values[key][seed_idx]
indices.append((key, seed, timestep))
timestep += 1
if timestep>=self.timestep_counts[key]:
timestep = 0
seed_idx += 1
if seed_idx >= self.seed_counts[key]:
seed_idx = 0
traj_idx += 1
self.indices = indices
# Initialize biases
if self.use_biases:
self.biases = {}
def get_files(self, key, seed):
# Cache based manager of data files
if not hasattr(self, 'cache_traj'):
self.cache_traj = dict()
self.cache_times = dict()
# Load Trajectory file
seed_hash = str(key)+"_"+str(seed)
if seed_hash in self.cache_traj.keys():
seed_file = self.cache_traj[seed_hash]
times = self.cache_times[seed_hash]
else:
seed_file = pd.read_csv(self.meas_file_paths[key][self.seed_values[key].index(seed)])
times = seed_file['t_idx'].unique()
if len(self.cache_traj) >= self.max_open_files:
pop_key = list(self.cache_traj.keys())[0]
self.cache_traj.pop(pop_key)
self.cache_times.pop(pop_key)
self.cache_traj[seed_hash] = seed_file
self.cache_times[seed_hash] = times
# # Repeat for Seed file
# seed_hash = str(key)+"_"+str(seed_idx)
# if seed_hash in self.cache_seed.keys():
# seed_file = self.cache_seed[seed_hash]
# else:
# seed_file = pd.read_csv(self.init_file_paths[key][seed_idx])
# if len(self.cache_traj) + len(self.cache_seed) >= self.max_open_files:
# self.cache_seed.pop(list(self.cache_seed.keys())[0])
# self.cache_seed[seed_hash] = seed_file
return seed_file, times
def add_guess_noise(self, true_XYZb):
rng = default_rng()
guess_noise = np.array([rng.uniform(-self.guess_range[0], self.guess_range[0]),
rng.uniform(-self.guess_range[1], self.guess_range[1]),
rng.uniform(-self.guess_range[2], self.guess_range[2]), # x, y, z
rng.uniform(0, self.guess_range[3]), # cdt
rng.uniform(-self.guess_range[4], self.guess_range[4]),
rng.uniform(-self.guess_range[5], self.guess_range[5]),
rng.uniform(-self.guess_range[6], self.guess_range[6]), # vx, vy, vz
rng.uniform(-self.guess_range[7], self.guess_range[7]) # cdt_dot
])
return true_XYZb + guess_noise
def __getitem__(self, idx):
key, seed_idx, timestep = self.indices[idx]
seed_file, times = self.get_files(key, seed_idx)
data = seed_file[seed_file['t_idx']==times[timestep]]
gpsweek, tow = datetime_to_tow(pd.to_datetime(times[timestep]))
ephem = data.set_index('sv')
_data0 = data.iloc[0]
# Select random initialization
true_XYZb = np.array([_data0['Rxx'], _data0['Rxy'], _data0['Rxz'], _data0['b'], _data0['Rxvx'], _data0['Rxvy'], _data0['Rxvz'], _data0['b_dot']])
guess_XYZb = self.add_guess_noise(true_XYZb) # Generate guess by adding noise to groundtruth
# guess_XYZb = np.copy(true_XYZb) # 0 noise for debugging
# Transform to NED frame
ref_local = coord.LocalCoord.from_ecef(guess_XYZb[:3])
guess_NEDb = np.copy(guess_XYZb)
guess_NEDb[:3] = ref_local.ecef2ned(guess_XYZb[:3, None])[:, 0] # position
guess_NEDb[4:7] = ref_local.ecef2nedv(guess_XYZb[4:7, None])[:, 0] # velocity
true_NEDb = np.copy(true_XYZb)
true_NEDb[:3] = ref_local.ecef2ned(true_XYZb[:3, None])[:, 0] # position
true_NEDb[4:7] = ref_local.ecef2nedv(true_XYZb[4:7, None])[:, 0] # velocity
# Generate expected measures and satellite positions/velocities
measurements, satXYZV = expected_measures(gpsweek, tow, ephem, guess_XYZb[:3], guess_XYZb[3], guess_XYZb[7], guess_XYZb[4:7])
# print(measurements, satXYZV, ephem)
# Primary feature extraction
residuals = (ephem[['prange', 'doppler']] - measurements).to_numpy()
los_vector = (satXYZV[['x', 'y', 'z']] - guess_XYZb[:3])
los_vector = los_vector.div(np.sqrt(np.square(los_vector).sum(axis=1)), axis='rows').to_numpy()
los_vector = ref_local.ecef2nedv(los_vector)
# vel_sat = (satXYZV[['vx', 'vy', 'vz']]).to_numpy()
# vel_sat = ref_local.ecef2nedv(vel_sat)/2750.0 # Normalizing sat velocity
# vel_veh = np.repeat(guess_XYZb[4:7][None, :], len(vel_sat), axis=0)
# Add biases
if self.use_biases:
if idx not in self.biases.keys():
num_sats = len(residuals)
num_biased = min(np.random.poisson(1), num_sats)
sat_indices = np.arange(num_sats)
np.random.shuffle(sat_indices)
bias_vec = np.zeros(num_sats)
for sat_idx in sat_indices[:num_biased]:
bias_vec[sat_idx] = np.random.uniform(50, 200)
self.biases[idx] = bias_vec
_residuals = residuals[:, 0] + self.biases[idx]
else:
_residuals = residuals[:, 0]
# Replace with some fancier feature extraction or input to permutation invariant layer
features = np.concatenate((_residuals[:, None], los_vector), axis=1)
sample = {
'features': torch.Tensor(features),
'true_correction': (true_NEDb-guess_NEDb)[:3],
# 'satpos': satXYZV.to_numpy(),
# 'measurements': measurements.to_numpy(),
'guess': guess_XYZb
}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return int(self.N_total) | en | 0.630869 | ######################################################################## # Author(s): <NAME>, <NAME> # Date: 21 September 2021 # Desc: Create PyTorch DataLoader for simulated measurements ######################################################################## # plotting # linear algebra # data processing, CSV file I/O (e.g. pd.read_csv) # Different kinds of simulated datasets each of which has its own folder # Dataset loader to handle differnt folders. For a heirarchy where we have different files with different entries (different measurement and ephemeris files I think) # Same as the previous but with files # init_dir = config['initialization_dir'] # info_path = config['info_path'] #cache size # Save number of entries in each file # self.info = pd.read_csv(os.path.join(self.root, info_path)) # self.timestep_counts = {row['id'] : row['len'] for row in self.info.iterrows()} # Save file paths # Done this way to add paths from multiple directories later # file_paths = {key : [] for key in self.meas_file_paths.keys()} # for file_path in os.listdir(os.path.join(self.root, init_dir)): # tmp_idx = os.path.split(file_path).split(".")[0] # traj_id, seed_id = tmp_idx.split("_") # traj_id = int(traj_id) # file_paths[traj_id].append(file_path) # Done this way to add paths from multiple directories later # self.init_file_paths = file_paths # Save number of seeds for each trajectory # Precompute indices (mapping from index to where that information is stored. index 899 -> file identifiers) # Initialize biases # Cache based manager of data files # Load Trajectory file # # Repeat for Seed file # seed_hash = str(key)+"_"+str(seed_idx) # if seed_hash in self.cache_seed.keys(): # seed_file = self.cache_seed[seed_hash] # else: # seed_file = pd.read_csv(self.init_file_paths[key][seed_idx]) # if len(self.cache_traj) + len(self.cache_seed) >= self.max_open_files: # self.cache_seed.pop(list(self.cache_seed.keys())[0]) # self.cache_seed[seed_hash] = seed_file # x, y, z # cdt # vx, vy, vz # cdt_dot # Select random initialization # Generate guess by adding noise to groundtruth # guess_XYZb = np.copy(true_XYZb) # 0 noise for debugging # Transform to NED frame # position # velocity # position # velocity # Generate expected measures and satellite positions/velocities # print(measurements, satXYZV, ephem) # Primary feature extraction # vel_sat = (satXYZV[['vx', 'vy', 'vz']]).to_numpy() # vel_sat = ref_local.ecef2nedv(vel_sat)/2750.0 # Normalizing sat velocity # vel_veh = np.repeat(guess_XYZb[4:7][None, :], len(vel_sat), axis=0) # Add biases # Replace with some fancier feature extraction or input to permutation invariant layer # 'satpos': satXYZV.to_numpy(), # 'measurements': measurements.to_numpy(), | 2.550211 | 3 |
python/resistor-color/resistor_color.py | parkerbxyz/exercism | 0 | 6614576 | from typing import Dict, List
COLOR_CODE: Dict[str, int] = {
'black': 0,
'brown': 1,
'red': 2,
'orange': 3,
'yellow': 4,
'green': 5,
'blue': 6,
'violet': 7,
'grey': 8,
'white': 9
}
def color_code(color: str) -> int:
"""Return the significant digit of a given color."""
return COLOR_CODE[color]
def colors() -> List[str]:
"""Return the colors with corresponding significant digits."""
return list(COLOR_CODE.keys())
| from typing import Dict, List
COLOR_CODE: Dict[str, int] = {
'black': 0,
'brown': 1,
'red': 2,
'orange': 3,
'yellow': 4,
'green': 5,
'blue': 6,
'violet': 7,
'grey': 8,
'white': 9
}
def color_code(color: str) -> int:
"""Return the significant digit of a given color."""
return COLOR_CODE[color]
def colors() -> List[str]:
"""Return the colors with corresponding significant digits."""
return list(COLOR_CODE.keys())
| en | 0.611104 | Return the significant digit of a given color. Return the colors with corresponding significant digits. | 3.825202 | 4 |
demucs/wdemucs.py | sparshpriyadarshi/demucs | 1 | 6614577 | <filename>demucs/wdemucs.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# For compat
from .hdemucs import HDemucs
WDemucs = HDemucs
| <filename>demucs/wdemucs.py
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# For compat
from .hdemucs import HDemucs
WDemucs = HDemucs
| en | 0.931304 | # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # For compat | 1.13008 | 1 |
infoxlm/src-infoxlm/infoxlm/tasks/tlm.py | Sanster/unilm | 5,129 | 6614578 | <reponame>Sanster/unilm<gh_stars>1000+
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.tasks.mlm import Mlm
@register_task("tlm")
class Tlm(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"])
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"])
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for key, value in logging_output.items():
agg_logging_output[key] += value
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
})
self.datasets[split] = dataset
| import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.tasks.mlm import Mlm
@register_task("tlm")
class Tlm(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"])
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"])
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for key, value in logging_output.items():
agg_logging_output[key] += value
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
})
self.datasets[split] = dataset | it | 0.351718 | # tlm step # mlm_step | 2.046148 | 2 |
SourceCode/Python/ChalmersGU/Other/ComparatorExample.py | ChalmersGU-data-structure-courses/OpenDSA | 0 | 6614579 | <filename>SourceCode/Python/ChalmersGU/Other/ComparatorExample.py
#/* *** ODSATag: ComparatorDemo *** */
from BaseAPI import Comparable
import functools
import operator
#/* *** ODSATag: PersonCompareTo *** */
#/* *** ODSATag: Person *** */
class Person(Comparable):
#/* *** ODSAendTag: PersonCompareTo *** */
def __init__(self, given, family, birth):
self.givenName = given
self.familyName = family
self.birthYear = birth
def __str__(self):
return f"{self.givenName} {self.familyName} ({self.birthYear})"
#/* *** ODSAendTag: Person *** */
#/* *** ODSATag: PersonCompareTo *** */
# ...as above...
def __eq__(self, other): return self.familyName == other.familyName
def __ne__(self, other): return self.familyName != other.familyName
def __lt__(self, other): return self.familyName < other.familyName
def __le__(self, other): return self.familyName <= other.familyName
def __gt__(self, other): return self.familyName > other.familyName
def __ge__(self, other): return self.familyName >= other.familyName
#/* *** ODSAendTag: PersonCompareTo *** */
#/* *** ODSATag: BirthYearComparator *** */
# Note: Python doesn't have comparators like Java does.
# The most similar is to define a comparator-like function:
def birthYearComparator(one, other):
return (-1 if one.birthYear < other.birthYear else
1 if one.birthYear > other.birthYear else 0)
#/* *** ODSAendTag: BirthYearComparator *** */
#/* *** ODSATag: GivenNameComparator *** */
def givenNameComparator(one, other):
return (-1 if one.givenName < other.givenName else
1 if one.givenName > other.givenName else 0)
#/* *** ODSAendTag: GivenNameComparator *** */
#/* *** ODSATag: FullNameComparator *** */
def fullNameComparator(one, other):
return (-1 if one.familyName < other.familyName else
1 if one.familyName > other.familyName else
-1 if one.givenName < other.givenName else
1 if one.givenName > other.givenName else 0)
#/* *** ODSAendTag: FullNameComparator *** */
#/* *** ODSATag: GetPeople *** */
def getPeople():
return [Person("Unsuk", "Chin", 1961),
Person("Anna", "Thorvaldsdóttir", 1977),
Person("Andrea", "Tarrodi", 1981),
Person("Diana", "Čemerytė", 1974),
Person("Elfrida", "Andrée", 1841),
Person("Guy", "d’Hardelot", 1858),
Person("Nadia", "Boulanger", 1887),
Person("Lili", "Boulanger", 1893),
]
#/* *** ODSAendTag: GetPeople *** */
print("\n### No order");
#/* *** ODSATag: PrintPeople *** */
people = getPeople()
for p in people: print(p)
#/* *** ODSAendTag: PrintPeople *** */
print("\n### Natural ordering")
#/* *** ODSATag: SortNatural *** */
people = getPeople() # reset the people list
people.sort()
for p in people: print(p)
#/* *** ODSAendTag: SortNatural *** */
print("\n### Ordered by birth year (pre-Java-8 solution)")
#/* *** ODSATag: SortByBirthYear *** */
byBirthYear = functools.cmp_to_key(birthYearComparator)
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
#/* *** ODSAendTag: SortByBirthYear *** */
print("\n### Ordered by birth year (functional solution)")
#/* *** ODSATag: ByBirthYearFunctional *** */
byBirthYear = lambda person: person.birthYear
#/* *** ODSAendTag: ByBirthYearFunctional *** */
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
print("\n### Ordered by birth year (using a key extractor)")
#/* *** ODSATag: ByBirthYearKeyExtractor *** */
byBirthYear = operator.attrgetter('birthYear')
#/* *** ODSAendTag: ByBirthYearKeyExtractor *** */
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
print("\n### Ordered by given name (pre-Java-8 solution)")
#/* *** ODSATag: SortByGivenName *** */
byGivenName = functools.cmp_to_key(givenNameComparator)
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
#/* *** ODSAendTag: SortByGivenName *** */
print("\n### Ordered by given name (functional solution)")
#/* *** ODSATag: ByGivenNameFunctional *** */
byGivenName = lambda person: person.givenName
#/* *** ODSAendTag: ByGivenNameFunctional *** */
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
print("\n### Ordered by given name (using a key extractor)")
#/* *** ODSATag: ByGivenNameKeyExtractor *** */
byGivenName = operator.attrgetter('givenName')
#/* *** ODSAendTag: ByGivenNameKeyExtractor *** */
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
print("\n### Ordered by full name: family name + given name (pre-Java-8 solution)")
#/* *** ODSATag: ByFullName *** */
byFullName = functools.cmp_to_key(fullNameComparator)
#/* *** ODSAendTag: ByFullName *** */
people = getPeople() # reset the people list
people.sort(key=byFullName)
for p in people: print(p)
print("\n### Ordered by full name: family name + given name (functional solution and tuples)")
#/* *** ODSATag: ByFullNameThenComparing *** */
# In Python we can simply create a tuple, and it will sort the way we want
byFullName = lambda person: (person.familyName, person.givenName)
#/* *** ODSAendTag: ByFullNameThenComparing *** */
#/* *** ODSATag: SortByFullName *** */
people = getPeople() # reset the people list
people.sort(key=byFullName)
for p in people: print(p)
#/* *** ODSAendTag: SortByFullName *** */
print("\n### Ordered by Swedish locale, case-insensitive")
print("# Note: There's a bug in Python's Swedish locale, so Č comes after all other letters")
#/* *** ODSATag: BySwedishLocale *** */
import locale
locale.setlocale(locale.LC_COLLATE, 'sv_SE')
bySwedishLocale = lambda person: (locale.strxfrm(person.familyName.casefold()),
locale.strxfrm(person.givenName.casefold()))
# Note: There's a bug in Python's Swedish locale, so Č comes after all other letters
#/* *** ODSAendTag: BySwedishLocale *** */
#/* *** ODSATag: SortBySwedishLocale *** */
people = getPeople() # reset the people list
people.sort(key=bySwedishLocale)
for p in people: print(p)
# Note: Because of a bug in Python's Swedish locale, Diana Čemerytė is still printed last
#/* *** ODSAendTag: SortBySwedishLocale *** */
print("\n### Ordered by Swedish locale, given name first")
bySwedishLocale = lambda person: (locale.strxfrm(person.givenName.casefold()),
locale.strxfrm(person.familyName.casefold()))
people = getPeople() # reset the people list
people.sort(key=bySwedishLocale)
for p in people: print(p)
#/* *** ODSAendTag: ComparatorDemo *** */
| <filename>SourceCode/Python/ChalmersGU/Other/ComparatorExample.py
#/* *** ODSATag: ComparatorDemo *** */
from BaseAPI import Comparable
import functools
import operator
#/* *** ODSATag: PersonCompareTo *** */
#/* *** ODSATag: Person *** */
class Person(Comparable):
#/* *** ODSAendTag: PersonCompareTo *** */
def __init__(self, given, family, birth):
self.givenName = given
self.familyName = family
self.birthYear = birth
def __str__(self):
return f"{self.givenName} {self.familyName} ({self.birthYear})"
#/* *** ODSAendTag: Person *** */
#/* *** ODSATag: PersonCompareTo *** */
# ...as above...
def __eq__(self, other): return self.familyName == other.familyName
def __ne__(self, other): return self.familyName != other.familyName
def __lt__(self, other): return self.familyName < other.familyName
def __le__(self, other): return self.familyName <= other.familyName
def __gt__(self, other): return self.familyName > other.familyName
def __ge__(self, other): return self.familyName >= other.familyName
#/* *** ODSAendTag: PersonCompareTo *** */
#/* *** ODSATag: BirthYearComparator *** */
# Note: Python doesn't have comparators like Java does.
# The most similar is to define a comparator-like function:
def birthYearComparator(one, other):
return (-1 if one.birthYear < other.birthYear else
1 if one.birthYear > other.birthYear else 0)
#/* *** ODSAendTag: BirthYearComparator *** */
#/* *** ODSATag: GivenNameComparator *** */
def givenNameComparator(one, other):
return (-1 if one.givenName < other.givenName else
1 if one.givenName > other.givenName else 0)
#/* *** ODSAendTag: GivenNameComparator *** */
#/* *** ODSATag: FullNameComparator *** */
def fullNameComparator(one, other):
return (-1 if one.familyName < other.familyName else
1 if one.familyName > other.familyName else
-1 if one.givenName < other.givenName else
1 if one.givenName > other.givenName else 0)
#/* *** ODSAendTag: FullNameComparator *** */
#/* *** ODSATag: GetPeople *** */
def getPeople():
return [Person("Unsuk", "Chin", 1961),
Person("Anna", "Thorvaldsdóttir", 1977),
Person("Andrea", "Tarrodi", 1981),
Person("Diana", "Čemerytė", 1974),
Person("Elfrida", "Andrée", 1841),
Person("Guy", "d’Hardelot", 1858),
Person("Nadia", "Boulanger", 1887),
Person("Lili", "Boulanger", 1893),
]
#/* *** ODSAendTag: GetPeople *** */
print("\n### No order");
#/* *** ODSATag: PrintPeople *** */
people = getPeople()
for p in people: print(p)
#/* *** ODSAendTag: PrintPeople *** */
print("\n### Natural ordering")
#/* *** ODSATag: SortNatural *** */
people = getPeople() # reset the people list
people.sort()
for p in people: print(p)
#/* *** ODSAendTag: SortNatural *** */
print("\n### Ordered by birth year (pre-Java-8 solution)")
#/* *** ODSATag: SortByBirthYear *** */
byBirthYear = functools.cmp_to_key(birthYearComparator)
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
#/* *** ODSAendTag: SortByBirthYear *** */
print("\n### Ordered by birth year (functional solution)")
#/* *** ODSATag: ByBirthYearFunctional *** */
byBirthYear = lambda person: person.birthYear
#/* *** ODSAendTag: ByBirthYearFunctional *** */
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
print("\n### Ordered by birth year (using a key extractor)")
#/* *** ODSATag: ByBirthYearKeyExtractor *** */
byBirthYear = operator.attrgetter('birthYear')
#/* *** ODSAendTag: ByBirthYearKeyExtractor *** */
people = getPeople() # reset the people list
people.sort(key=byBirthYear)
for p in people: print(p)
print("\n### Ordered by given name (pre-Java-8 solution)")
#/* *** ODSATag: SortByGivenName *** */
byGivenName = functools.cmp_to_key(givenNameComparator)
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
#/* *** ODSAendTag: SortByGivenName *** */
print("\n### Ordered by given name (functional solution)")
#/* *** ODSATag: ByGivenNameFunctional *** */
byGivenName = lambda person: person.givenName
#/* *** ODSAendTag: ByGivenNameFunctional *** */
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
print("\n### Ordered by given name (using a key extractor)")
#/* *** ODSATag: ByGivenNameKeyExtractor *** */
byGivenName = operator.attrgetter('givenName')
#/* *** ODSAendTag: ByGivenNameKeyExtractor *** */
people = getPeople() # reset the people list
people.sort(key=byGivenName)
for p in people: print(p)
print("\n### Ordered by full name: family name + given name (pre-Java-8 solution)")
#/* *** ODSATag: ByFullName *** */
byFullName = functools.cmp_to_key(fullNameComparator)
#/* *** ODSAendTag: ByFullName *** */
people = getPeople() # reset the people list
people.sort(key=byFullName)
for p in people: print(p)
print("\n### Ordered by full name: family name + given name (functional solution and tuples)")
#/* *** ODSATag: ByFullNameThenComparing *** */
# In Python we can simply create a tuple, and it will sort the way we want
byFullName = lambda person: (person.familyName, person.givenName)
#/* *** ODSAendTag: ByFullNameThenComparing *** */
#/* *** ODSATag: SortByFullName *** */
people = getPeople() # reset the people list
people.sort(key=byFullName)
for p in people: print(p)
#/* *** ODSAendTag: SortByFullName *** */
print("\n### Ordered by Swedish locale, case-insensitive")
print("# Note: There's a bug in Python's Swedish locale, so Č comes after all other letters")
#/* *** ODSATag: BySwedishLocale *** */
import locale
locale.setlocale(locale.LC_COLLATE, 'sv_SE')
bySwedishLocale = lambda person: (locale.strxfrm(person.familyName.casefold()),
locale.strxfrm(person.givenName.casefold()))
# Note: There's a bug in Python's Swedish locale, so Č comes after all other letters
#/* *** ODSAendTag: BySwedishLocale *** */
#/* *** ODSATag: SortBySwedishLocale *** */
people = getPeople() # reset the people list
people.sort(key=bySwedishLocale)
for p in people: print(p)
# Note: Because of a bug in Python's Swedish locale, Diana Čemerytė is still printed last
#/* *** ODSAendTag: SortBySwedishLocale *** */
print("\n### Ordered by Swedish locale, given name first")
bySwedishLocale = lambda person: (locale.strxfrm(person.givenName.casefold()),
locale.strxfrm(person.familyName.casefold()))
people = getPeople() # reset the people list
people.sort(key=bySwedishLocale)
for p in people: print(p)
#/* *** ODSAendTag: ComparatorDemo *** */
| en | 0.434655 | #/* *** ODSATag: ComparatorDemo *** */ #/* *** ODSATag: PersonCompareTo *** */ #/* *** ODSATag: Person *** */ #/* *** ODSAendTag: PersonCompareTo *** */ #/* *** ODSAendTag: Person *** */ #/* *** ODSATag: PersonCompareTo *** */ # ...as above... #/* *** ODSAendTag: PersonCompareTo *** */ #/* *** ODSATag: BirthYearComparator *** */ # Note: Python doesn't have comparators like Java does. # The most similar is to define a comparator-like function: #/* *** ODSAendTag: BirthYearComparator *** */ #/* *** ODSATag: GivenNameComparator *** */ #/* *** ODSAendTag: GivenNameComparator *** */ #/* *** ODSATag: FullNameComparator *** */ #/* *** ODSAendTag: FullNameComparator *** */ #/* *** ODSATag: GetPeople *** */ #/* *** ODSAendTag: GetPeople *** */ ### No order"); #/* *** ODSATag: PrintPeople *** */ #/* *** ODSAendTag: PrintPeople *** */ ### Natural ordering") #/* *** ODSATag: SortNatural *** */ # reset the people list #/* *** ODSAendTag: SortNatural *** */ ### Ordered by birth year (pre-Java-8 solution)") #/* *** ODSATag: SortByBirthYear *** */ # reset the people list #/* *** ODSAendTag: SortByBirthYear *** */ ### Ordered by birth year (functional solution)") #/* *** ODSATag: ByBirthYearFunctional *** */ #/* *** ODSAendTag: ByBirthYearFunctional *** */ # reset the people list ### Ordered by birth year (using a key extractor)") #/* *** ODSATag: ByBirthYearKeyExtractor *** */ #/* *** ODSAendTag: ByBirthYearKeyExtractor *** */ # reset the people list ### Ordered by given name (pre-Java-8 solution)") #/* *** ODSATag: SortByGivenName *** */ # reset the people list #/* *** ODSAendTag: SortByGivenName *** */ ### Ordered by given name (functional solution)") #/* *** ODSATag: ByGivenNameFunctional *** */ #/* *** ODSAendTag: ByGivenNameFunctional *** */ # reset the people list ### Ordered by given name (using a key extractor)") #/* *** ODSATag: ByGivenNameKeyExtractor *** */ #/* *** ODSAendTag: ByGivenNameKeyExtractor *** */ # reset the people list ### Ordered by full name: family name + given name (pre-Java-8 solution)") #/* *** ODSATag: ByFullName *** */ #/* *** ODSAendTag: ByFullName *** */ # reset the people list ### Ordered by full name: family name + given name (functional solution and tuples)") #/* *** ODSATag: ByFullNameThenComparing *** */ # In Python we can simply create a tuple, and it will sort the way we want #/* *** ODSAendTag: ByFullNameThenComparing *** */ #/* *** ODSATag: SortByFullName *** */ # reset the people list #/* *** ODSAendTag: SortByFullName *** */ ### Ordered by Swedish locale, case-insensitive") #/* *** ODSATag: BySwedishLocale *** */ # Note: There's a bug in Python's Swedish locale, so Č comes after all other letters #/* *** ODSAendTag: BySwedishLocale *** */ #/* *** ODSATag: SortBySwedishLocale *** */ # reset the people list # Note: Because of a bug in Python's Swedish locale, Diana Čemerytė is still printed last #/* *** ODSAendTag: SortBySwedishLocale *** */ ### Ordered by Swedish locale, given name first") # reset the people list #/* *** ODSAendTag: ComparatorDemo *** */ | 3.49067 | 3 |
kite-go/lang/python/pythonparser/epytext/testdata/literal-blank-line.py | kiteco/kiteco-public | 17 | 6614580 | <gh_stars>10-100
# Blank lines are preserved, but the whitespace on a blank
# line isn't. Tab-indented line is replaced by spaces.
def example():
"""
This introduces a literal::
First line, followed by blank...
With a 5-space indented line;
And a 4-tab indented line;
Then followed by 2 blank lines with indent...
Then followed by other paragraph.
Other paragraph.
"""
return 1
| # Blank lines are preserved, but the whitespace on a blank
# line isn't. Tab-indented line is replaced by spaces.
def example():
"""
This introduces a literal::
First line, followed by blank...
With a 5-space indented line;
And a 4-tab indented line;
Then followed by 2 blank lines with indent...
Then followed by other paragraph.
Other paragraph.
"""
return 1 | en | 0.895048 | # Blank lines are preserved, but the whitespace on a blank # line isn't. Tab-indented line is replaced by spaces. This introduces a literal:: First line, followed by blank... With a 5-space indented line; And a 4-tab indented line; Then followed by 2 blank lines with indent... Then followed by other paragraph. Other paragraph. | 2.94228 | 3 |
day13.py | kdrag0n/aoc2021 | 2 | 6614581 | #!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], 'r') as f:
file_lines = [l for l in f.read().strip().split('\n')]
in_nums = []
total = 0
result = 0
other = 0
grid = [[False] * 2000 for i in range(2000)]
folds = []
while True:
for l in file_lines:
if not l:
continue
elif l.startswith('fold'):
_, _, ins = l.split()
axis, pos = ins.split('=')
pos = int(pos)
folds += [(axis, pos)]
else:
x, y = map(int, l.split(','))
grid[y][x] = True
if False:
total += 1
break
def print_grid():
for r in grid:
print(''.join(('#' if c else '.') for c in r))
# print_grid()
print(folds)
for axis, pos in folds:
print(axis, pos)
if axis == 'x':
for x2 in range(pos + 1, 2000):
for y in range(2000):
if not grid[y][x2]:
continue
refp = pos - (x2 - (pos))
if refp < 0:
continue
grid[y][refp] = True
grid[y][x2] = False
elif axis == 'y':
for y2 in range(pos + 1, 2000):
for x in range(2000):
if not grid[y2][x]:
continue
refp = pos - (y2 - (pos))
# print(x, y2)
# print('refp', refp)
if refp < 0:
continue
grid[refp][x] = True
grid[y2][x] = False
# print_grid()
break
x = 0
for r in grid:
for c in r:
if c:
x += 1
print(x)
print(f'Total: {total}')
print(f'Result: {result}')
print(f'Other: {other}')
| #!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], 'r') as f:
file_lines = [l for l in f.read().strip().split('\n')]
in_nums = []
total = 0
result = 0
other = 0
grid = [[False] * 2000 for i in range(2000)]
folds = []
while True:
for l in file_lines:
if not l:
continue
elif l.startswith('fold'):
_, _, ins = l.split()
axis, pos = ins.split('=')
pos = int(pos)
folds += [(axis, pos)]
else:
x, y = map(int, l.split(','))
grid[y][x] = True
if False:
total += 1
break
def print_grid():
for r in grid:
print(''.join(('#' if c else '.') for c in r))
# print_grid()
print(folds)
for axis, pos in folds:
print(axis, pos)
if axis == 'x':
for x2 in range(pos + 1, 2000):
for y in range(2000):
if not grid[y][x2]:
continue
refp = pos - (x2 - (pos))
if refp < 0:
continue
grid[y][refp] = True
grid[y][x2] = False
elif axis == 'y':
for y2 in range(pos + 1, 2000):
for x in range(2000):
if not grid[y2][x]:
continue
refp = pos - (y2 - (pos))
# print(x, y2)
# print('refp', refp)
if refp < 0:
continue
grid[refp][x] = True
grid[y2][x] = False
# print_grid()
break
x = 0
for r in grid:
for c in r:
if c:
x += 1
print(x)
print(f'Total: {total}')
print(f'Result: {result}')
print(f'Other: {other}')
| en | 0.276065 | #!/usr/bin/env python3 # print_grid() # print(x, y2) # print('refp', refp) # print_grid() | 3.122555 | 3 |
Codes.python/P7/P7.py | hanzenglong/robot | 0 | 6614582 | <reponame>hanzenglong/robot
#-------by HYH -------#
import numpy as np
import matplotlib.pyplot as plt
p=[1,0,0,0,0]
u=1
step=100
pExact=0.8
pOvershoot=0.1
pUndershoot=0.1
entropy=np.zeros(step)
plt.figure(figsize=(10,10),dpi=80)
def move(p,u,pExact,pOvershoot,pUndershoot):
n=len(p)
q=np.zeros(n)
for i in range(n):
q[i]=pExact*p[(i-u)%n]
q[i]=q[i]+pOvershoot*p[(i-1-u)%n]
q[i]=q[i]+pUndershoot*p[(i+1-u)%n]
return q
for i in range(step):
p=move(p,u,pExact,pOvershoot,pUndershoot)
entropy[i]=-np.sum(p*np.log2(p))
print(i+1,'\n',p)
x=np.arange(0,step)
plt.plot(x,entropy,'g-',x,entropy,'r^')
plt.xlabel('Motion step')
plt.ylabel('Entropy')
plt.show()
| #-------by HYH -------#
import numpy as np
import matplotlib.pyplot as plt
p=[1,0,0,0,0]
u=1
step=100
pExact=0.8
pOvershoot=0.1
pUndershoot=0.1
entropy=np.zeros(step)
plt.figure(figsize=(10,10),dpi=80)
def move(p,u,pExact,pOvershoot,pUndershoot):
n=len(p)
q=np.zeros(n)
for i in range(n):
q[i]=pExact*p[(i-u)%n]
q[i]=q[i]+pOvershoot*p[(i-1-u)%n]
q[i]=q[i]+pUndershoot*p[(i+1-u)%n]
return q
for i in range(step):
p=move(p,u,pExact,pOvershoot,pUndershoot)
entropy[i]=-np.sum(p*np.log2(p))
print(i+1,'\n',p)
x=np.arange(0,step)
plt.plot(x,entropy,'g-',x,entropy,'r^')
plt.xlabel('Motion step')
plt.ylabel('Entropy')
plt.show() | pt | 0.114208 | #-------by HYH -------# | 3.160393 | 3 |
excel.py | fanfanadmin/tools | 0 | 6614583 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "ffadmin"
"""
ex:
将fandaguai文件夹下的所有以xls结尾的表格读取生成到当前目录下的新表格
并去重
$python excel_handle.py fandaguai
"""
import xlrd,xlwt,os,datetime,sys
def read_xls(excel_directory):
#获取文件夹下的xls文件
__excel_data = []
list = os.listdir(excel_directory)
for i in list:
if os.path.splitext(i)[1] == '.xls':
#print(i)
file_xls = xlrd.open_workbook("%s/%s"%(excel_directory,i))
#获取第0个索引的sheet
sheet_index = file_xls.sheet_by_index(0)
num_rows = sheet_index.nrows
#print(num_rows)
#获取说有表格内容,添加到list
for i in range(1,num_rows):
test = sheet_index.row_values(i)
__excel_data.append(test)
__new_excel_data = []
for cont in __excel_data:
if cont not in __new_excel_data:
__new_excel_data.append(cont)
return (__new_excel_data)
def write_xls(excel_directory):
__book = xlwt.Workbook() # 创建一个Excel
__sheet1 = __book.add_sheet('data') # 在其中创建一个名为data的sheet
__data = read_xls(excel_directory)
__l12 = list(range(len(__data)))
__dict1 = dict(zip(__l12,__data))
#print(dict1)
ldata = []
num = [a for a in __dict1]
# for循环指定取出key值存入num中
num.sort()
# 字典数据取出后无需,需要先排序
for x in num:
# for循环将data字典中的键和值分批的保存在ldata中
t = [int(x)]
for a in __dict1[x]:
t.append(a)
ldata.append(t)
for i, p in enumerate(ldata):
# 将数据写入文件,i是enumerate()函数返回的序号数
for j, q in enumerate(p):
#print(i,j,q)
__sheet1.write(i, j, q)
__book.save("demo.xls")
if __name__ == '__main__':
start_time = datetime.datetime.now()
#print(start_time)
write_xls(sys.argv[1])
end_time = datetime.datetime.now()
total_time = end_time - start_time
#print(end_time)
print("用时:%s"%total_time)
print("===================end=====================")
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "ffadmin"
"""
ex:
将fandaguai文件夹下的所有以xls结尾的表格读取生成到当前目录下的新表格
并去重
$python excel_handle.py fandaguai
"""
import xlrd,xlwt,os,datetime,sys
def read_xls(excel_directory):
#获取文件夹下的xls文件
__excel_data = []
list = os.listdir(excel_directory)
for i in list:
if os.path.splitext(i)[1] == '.xls':
#print(i)
file_xls = xlrd.open_workbook("%s/%s"%(excel_directory,i))
#获取第0个索引的sheet
sheet_index = file_xls.sheet_by_index(0)
num_rows = sheet_index.nrows
#print(num_rows)
#获取说有表格内容,添加到list
for i in range(1,num_rows):
test = sheet_index.row_values(i)
__excel_data.append(test)
__new_excel_data = []
for cont in __excel_data:
if cont not in __new_excel_data:
__new_excel_data.append(cont)
return (__new_excel_data)
def write_xls(excel_directory):
__book = xlwt.Workbook() # 创建一个Excel
__sheet1 = __book.add_sheet('data') # 在其中创建一个名为data的sheet
__data = read_xls(excel_directory)
__l12 = list(range(len(__data)))
__dict1 = dict(zip(__l12,__data))
#print(dict1)
ldata = []
num = [a for a in __dict1]
# for循环指定取出key值存入num中
num.sort()
# 字典数据取出后无需,需要先排序
for x in num:
# for循环将data字典中的键和值分批的保存在ldata中
t = [int(x)]
for a in __dict1[x]:
t.append(a)
ldata.append(t)
for i, p in enumerate(ldata):
# 将数据写入文件,i是enumerate()函数返回的序号数
for j, q in enumerate(p):
#print(i,j,q)
__sheet1.write(i, j, q)
__book.save("demo.xls")
if __name__ == '__main__':
start_time = datetime.datetime.now()
#print(start_time)
write_xls(sys.argv[1])
end_time = datetime.datetime.now()
total_time = end_time - start_time
#print(end_time)
print("用时:%s"%total_time)
print("===================end=====================")
| zh | 0.599617 | #! /usr/bin/env python # -*- coding: utf-8 -*- # __author__ = "ffadmin" ex: 将fandaguai文件夹下的所有以xls结尾的表格读取生成到当前目录下的新表格 并去重 $python excel_handle.py fandaguai #获取文件夹下的xls文件 #print(i) #获取第0个索引的sheet #print(num_rows) #获取说有表格内容,添加到list # 创建一个Excel # 在其中创建一个名为data的sheet #print(dict1) # for循环指定取出key值存入num中 # 字典数据取出后无需,需要先排序 # for循环将data字典中的键和值分批的保存在ldata中 # 将数据写入文件,i是enumerate()函数返回的序号数 #print(i,j,q) #print(start_time) #print(end_time) | 3.049735 | 3 |
mtlearn/evaluate.py | okuraoy/ml-app | 1 | 6614584 | <reponame>okuraoy/ml-app
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by guanlei on 2017/7/20
"""
from sklearn.metrics import explained_variance_score, mean_squared_error
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
import numpy as np
rcParams['figure.figsize'] = 15, 6
# 用来正常显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
# 用来正常显示负号
rcParams['axes.unicode_minus'] = False
def plot_predict_result(predict, actual):
mse = mean_squared_error(actual, predict)
evs = explained_variance_score(actual, predict)
# predicts = pd.DataFrame(np.array(predicts), index=test.index)
plt.plot(actual.inx, actual.data, label="actual", color='blue')
plt.plot(actual.inx, predict, label="predict", color='red')
plt.legend(loc='best')
plt.title('Mean squared error: %.4f,Explained variance score:%.4f' % (mse, evs))
plt.show()
def plot_importance(importance, index, features):
pos = np.arange(index.shape[0]) + .5
# plt.subplot(1, 2, 2)
plt.barh(pos, importance[index], align='center')
plt.yticks(pos, features[index])
plt.xlabel('Importance')
plt.title('Feature Importance')
plt.show()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by guanlei on 2017/7/20
"""
from sklearn.metrics import explained_variance_score, mean_squared_error
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
import numpy as np
rcParams['figure.figsize'] = 15, 6
# 用来正常显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
# 用来正常显示负号
rcParams['axes.unicode_minus'] = False
def plot_predict_result(predict, actual):
mse = mean_squared_error(actual, predict)
evs = explained_variance_score(actual, predict)
# predicts = pd.DataFrame(np.array(predicts), index=test.index)
plt.plot(actual.inx, actual.data, label="actual", color='blue')
plt.plot(actual.inx, predict, label="predict", color='red')
plt.legend(loc='best')
plt.title('Mean squared error: %.4f,Explained variance score:%.4f' % (mse, evs))
plt.show()
def plot_importance(importance, index, features):
pos = np.arange(index.shape[0]) + .5
# plt.subplot(1, 2, 2)
plt.barh(pos, importance[index], align='center')
plt.yticks(pos, features[index])
plt.xlabel('Importance')
plt.title('Feature Importance')
plt.show() | en | 0.246328 | #!/usr/bin/python # -*- coding: utf-8 -*- Created by guanlei on 2017/7/20 # 用来正常显示中文标签 # 用来正常显示负号 # predicts = pd.DataFrame(np.array(predicts), index=test.index) # plt.subplot(1, 2, 2) | 2.976577 | 3 |
utils/grayscale.py | 0xd3ba/seam-carving | 0 | 6614585 | # grayscale.py -- Module containing the functions to convert a RGB image to grayscale
def to_grayscale(image_np):
"""
image_np: 2D numpy array of shape (height, width, channels)
Converts the image to grayscale image and returns it
"""
assert len(image_np.shape) >= 2, f"Image must be 2D, provided {len(image_np.shape)}D instead"
# If the number of dimensions are 2, then the image is already in grayscale
if len(image_np.shape) == 2:
return image_np
# Convert it to grayscale using weighted sum of the channel intensities
return (image_np[:, :, 0]*0.299 +
image_np[:, :, 1]*0.587 +
image_np[:, :, 2]*0.114
)
| # grayscale.py -- Module containing the functions to convert a RGB image to grayscale
def to_grayscale(image_np):
"""
image_np: 2D numpy array of shape (height, width, channels)
Converts the image to grayscale image and returns it
"""
assert len(image_np.shape) >= 2, f"Image must be 2D, provided {len(image_np.shape)}D instead"
# If the number of dimensions are 2, then the image is already in grayscale
if len(image_np.shape) == 2:
return image_np
# Convert it to grayscale using weighted sum of the channel intensities
return (image_np[:, :, 0]*0.299 +
image_np[:, :, 1]*0.587 +
image_np[:, :, 2]*0.114
)
| en | 0.766879 | # grayscale.py -- Module containing the functions to convert a RGB image to grayscale image_np: 2D numpy array of shape (height, width, channels) Converts the image to grayscale image and returns it # If the number of dimensions are 2, then the image is already in grayscale # Convert it to grayscale using weighted sum of the channel intensities | 4.072424 | 4 |
tests/h/streamer/kill_switch_views_test.py | tgiardina/rpp-h | 2,103 | 6614586 | from h.streamer.kill_switch_views import not_found
def test_not_found_view(pyramid_request):
response = not_found(Exception(), pyramid_request)
assert response.status_code == 429
assert response.content_type is None
| from h.streamer.kill_switch_views import not_found
def test_not_found_view(pyramid_request):
response = not_found(Exception(), pyramid_request)
assert response.status_code == 429
assert response.content_type is None
| none | 1 | 1.779932 | 2 | |
hydroengine_service/cli.py | schnjaso2/hydro-engine-service | 0 | 6614587 | <filename>hydroengine_service/cli.py
# -*- coding: utf-8 -*-
"""Console script for hydroengine_service."""
import sys
import click
import hydroengine_service.main
@click.command()
def main(args=None):
"""Console script for hydroengine_service."""
hydroengine_service.main.app.run(host='127.0.0.1', port=8080, debug=True)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| <filename>hydroengine_service/cli.py
# -*- coding: utf-8 -*-
"""Console script for hydroengine_service."""
import sys
import click
import hydroengine_service.main
@click.command()
def main(args=None):
"""Console script for hydroengine_service."""
hydroengine_service.main.app.run(host='127.0.0.1', port=8080, debug=True)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| en | 0.760233 | # -*- coding: utf-8 -*- Console script for hydroengine_service. Console script for hydroengine_service. # pragma: no cover | 2.076651 | 2 |
test.py | 77Sera/Vegetable-Fuzzer | 3 | 6614588 | import requests
url = 'http://localhost:89/sqli_labs/Less-2/?id=1'
postdata = "uname=1&passwd=2&submit=Submit"
req = requests.post(url=url,data=postdata)
with open("test.html","w") as file:
file.write(req.text) | import requests
url = 'http://localhost:89/sqli_labs/Less-2/?id=1'
postdata = "uname=1&passwd=2&submit=Submit"
req = requests.post(url=url,data=postdata)
with open("test.html","w") as file:
file.write(req.text) | none | 1 | 2.511129 | 3 | |
python/ch_14_neopixel.py | huaiyang/raspberrypi_cookbook_ed3 | 65 | 6614589 | import time
import board
from neopixel import NeoPixel
led_count = 5
red = (100, 0, 0)
no_color = (0, 0, 0)
strip = NeoPixel(board.D18, led_count, auto_write=False)
def clear():
for i in range(0, led_count):
strip[i] = no_color
strip.show()
i = 0
while True:
clear()
strip[i] = red
strip.show()
time.sleep(1)
i += 1
if i >= led_count:
i = 0
| import time
import board
from neopixel import NeoPixel
led_count = 5
red = (100, 0, 0)
no_color = (0, 0, 0)
strip = NeoPixel(board.D18, led_count, auto_write=False)
def clear():
for i in range(0, led_count):
strip[i] = no_color
strip.show()
i = 0
while True:
clear()
strip[i] = red
strip.show()
time.sleep(1)
i += 1
if i >= led_count:
i = 0
| none | 1 | 2.999298 | 3 | |
pymatgen/analysis/chemenv/coordination_environments/coordination_geometries_files/__init__.py | exenGT/pymatgen | 921 | 6614590 | <reponame>exenGT/pymatgen<gh_stars>100-1000
"""
Coordination geometry files.
"""
| """
Coordination geometry files.
""" | en | 0.494402 | Coordination geometry files. | 1.000256 | 1 |
cli/tfthelper.py | MapleHock/TFTHelper | 1 | 6614591 | <reponame>MapleHock/TFTHelper<gh_stars>1-10
import sys
import numpy as np
from scipy.stats import binom
from scipy.stats import hypergeom
probMatrix = np.array(
[[1.0, 0, 0, 0, 0],
[1.0, 0, 0, 0, 0],
[0.75, 0.25, 0, 0, 0],
[0.55, 0.30, 0.15, 0, 0],
[0.45, 0.33, 0.20, 0.02, 0],
[0.35, 0.35, 0.25, 0.05, 0],
[0.19, 0.35, 0.30, 0.15, 0.01],
[0.10, 0.25, 0.35, 0.25, 0.05],
[0.10, 0.15, 0.30, 0.30, 0.15]]
)
poolSize = [29, 22, 18, 12, 10]
tierSpeciesNum = [13, 13, 13, 11, 8]
def GetCardDrawnPDFArray(Lv, targetTier, numRolling, targetExist = 0, sameTierExist = 0):
'''
Returns the Probability Density/mass Function array of "how many the cards you get" for given rolling times.
lv - level of the little legend
targetTier - the tier of your target card
numRolling - number of rolling
targetExist - the number of your target card that has been drawn
sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist")
! It's a three stages process.
Stage1, get pdf of #targetTier you get(variable 'n', binom distribution)
Stage2, condition to fixed n, draw cards in the target pool, get pdf of #targetCard(conditional distribution, HyperGeo)
Stage3, synthesize those conditional distribution by Law of total expectation / weighted sum
'''
singleDrawProb = probMatrix[Lv - 1, targetTier - 1]
tierDrawPDFArray = binom.pmf(np.arange(0, numRolling * 5 + 1), numRolling * 5, singleDrawProb)
N = poolSize[targetTier - 1] - targetExist
M = poolSize[targetTier - 1] * tierSpeciesNum[targetTier - 1] - targetExist - sameTierExist
maxGetNum = min(numRolling * 5, N)
targetDrawPDFArray = np.zeros(maxGetNum + 1)
for n in range(0, min(numRolling * 5, M) + 1):
targetDrawPDFArrayn = hypergeom.pmf(np.arange(0, n + 1), M, n, N)
if (targetDrawPDFArrayn.size == 1):
targetDrawPDFArrayn = np.zeros(maxGetNum + 1)
targetDrawPDFArrayn[0] = 1
if (targetDrawPDFArrayn.size < maxGetNum + 1):
targetDrawPDFArrayn = np.pad(targetDrawPDFArrayn, (0, maxGetNum - targetDrawPDFArrayn.size + 1), 'constant', constant_values = (0, 0))
else:
targetDrawPDFArrayn = targetDrawPDFArrayn[0 : maxGetNum + 1]
targetDrawPDFArray += targetDrawPDFArrayn * tierDrawPDFArray[n]
return targetDrawPDFArray
def GetStopTimePDFArray(Lv, targetTier, MinNum, targetExist = 0, sameTierExist = 0):
'''
Returns the Probability Density/mass Function array of the "rolling" you need for "roll until reach the min number target card" policy.
lv - level of the little legend
targetTier - the tier of your target card
MinNum - the minium number of target card to stop rolling
targetExist - the number of your target card that has been drawn
sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist")
Calculates by complementary of A."stop at after t rolling" and B. "rolling t times but get less than minNum" or "stop after 1,2,..,t-1 rolling"
'''
stopTimePDFArray = []
stopTimePDFArray.append(0)
breakThreshold = 1e-2
stopTime = 1
PrSum = 0
while(True):
targetDrawPDFArray = GetCardDrawnPDFArray(Lv, targetTier, stopTime, targetExist, sameTierExist)
PrStopTime = 1 - PrSum - sum(targetDrawPDFArray[0:min(5 * stopTime + 1, MinNum)])
stopTimePDFArray.append(PrStopTime)
if PrStopTime * stopTime < breakThreshold and 1 - PrSum < breakThreshold / 100:
break
stopTime += 1
PrSum += PrStopTime
return np.array(stopTimePDFArray)
def PrintDistribution(PDFArray):
getNumArray = np.arange(0, PDFArray.size)
mean = np.sum(getNumArray * PDFArray)
std = np.sum(getNumArray ** 2 * PDFArray) - mean ** 2
std = np.sqrt(std)
probSum = 0
print('num\tprob')
for i in range(0, PDFArray.size):
if (PDFArray[i] > 1e-4):
print('%d \t%.2f%%' %(i, PDFArray[i] * 100))
else:
print('%d \t%.2e' %(i, PDFArray[i]))
probSum += PDFArray[i]
if (probSum > 1 - 1e-4):
break
print('mean: %.2f' %(mean))
print('std: %.2f' %(std))
def PrintRollingProbTable():
print('Lv/Tier\t 1 \t 2 \t 3 \t 4 \t 5 ')
for lv in range(2, 10):
print('%d \t' %(lv), end='')
for tier in range(1, 6):
print('%3d\t' % (probMatrix[lv-1, tier - 1] * 100), end='')
print('')
print('')
print('Poolsize', end='')
for tier in range(1, 6):
print('%3d\t' %(poolSize[tier - 1]), end='')
print('')
if __name__ == '__main__':
if(len(sys.argv) == 1):
print('no more input arguments, please use -h or --help subcommand to get help')
sys.exit(0)
if (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
print('usage:\n1. tfthelper -r \t to show rolling prob table at each level\n2. tfthelper -d <Lv> <target tier> <numRolling> [#target drawn (Default 0)] [#other same tier cards drawn(Default 0)] \t return the distribution and statistics of the number of target you get(given rolling time)\n3.tfthelper -s <Lv> <target tier> <Count for Stopping> [#target drawn(Default 0)] [#other same tier cards drawn(Default 0)]\t return the distribution and statistics of the number of rolling(given the target counts of stopping)')
sys.exit(0)
if (sys.argv[1] == '-r' or sys.argv[1] == '--rtable'):
PrintRollingProbTable()
sys.exit(0)
if (sys.argv[1] == '-d' or sys.argv[1] == '--drawout'):
arg = [0, 0, 0, 0, 0]
for i in range(len(sys.argv) - 2):
arg[i] = int(sys.argv[i + 2])
targetDrawPDFArray = GetCardDrawnPDFArray(arg[0], arg[1], arg[2], arg[3], arg[4])
PrintDistribution(targetDrawPDFArray)
sys.exit(0)
if (sys.argv[1] == '-s' or sys.argv[1] == '--stoptime'):
arg = [0, 0, 0, 0, 0]
for i in range(len(sys.argv) - 2):
arg[i] = int(sys.argv[i + 2])
stoptimePDFArray = GetStopTimePDFArray(arg[0], arg[1], arg[2], arg[3], arg[4])
PrintDistribution(stoptimePDFArray)
sys.exit(0)
| import sys
import numpy as np
from scipy.stats import binom
from scipy.stats import hypergeom
probMatrix = np.array(
[[1.0, 0, 0, 0, 0],
[1.0, 0, 0, 0, 0],
[0.75, 0.25, 0, 0, 0],
[0.55, 0.30, 0.15, 0, 0],
[0.45, 0.33, 0.20, 0.02, 0],
[0.35, 0.35, 0.25, 0.05, 0],
[0.19, 0.35, 0.30, 0.15, 0.01],
[0.10, 0.25, 0.35, 0.25, 0.05],
[0.10, 0.15, 0.30, 0.30, 0.15]]
)
poolSize = [29, 22, 18, 12, 10]
tierSpeciesNum = [13, 13, 13, 11, 8]
def GetCardDrawnPDFArray(Lv, targetTier, numRolling, targetExist = 0, sameTierExist = 0):
'''
Returns the Probability Density/mass Function array of "how many the cards you get" for given rolling times.
lv - level of the little legend
targetTier - the tier of your target card
numRolling - number of rolling
targetExist - the number of your target card that has been drawn
sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist")
! It's a three stages process.
Stage1, get pdf of #targetTier you get(variable 'n', binom distribution)
Stage2, condition to fixed n, draw cards in the target pool, get pdf of #targetCard(conditional distribution, HyperGeo)
Stage3, synthesize those conditional distribution by Law of total expectation / weighted sum
'''
singleDrawProb = probMatrix[Lv - 1, targetTier - 1]
tierDrawPDFArray = binom.pmf(np.arange(0, numRolling * 5 + 1), numRolling * 5, singleDrawProb)
N = poolSize[targetTier - 1] - targetExist
M = poolSize[targetTier - 1] * tierSpeciesNum[targetTier - 1] - targetExist - sameTierExist
maxGetNum = min(numRolling * 5, N)
targetDrawPDFArray = np.zeros(maxGetNum + 1)
for n in range(0, min(numRolling * 5, M) + 1):
targetDrawPDFArrayn = hypergeom.pmf(np.arange(0, n + 1), M, n, N)
if (targetDrawPDFArrayn.size == 1):
targetDrawPDFArrayn = np.zeros(maxGetNum + 1)
targetDrawPDFArrayn[0] = 1
if (targetDrawPDFArrayn.size < maxGetNum + 1):
targetDrawPDFArrayn = np.pad(targetDrawPDFArrayn, (0, maxGetNum - targetDrawPDFArrayn.size + 1), 'constant', constant_values = (0, 0))
else:
targetDrawPDFArrayn = targetDrawPDFArrayn[0 : maxGetNum + 1]
targetDrawPDFArray += targetDrawPDFArrayn * tierDrawPDFArray[n]
return targetDrawPDFArray
def GetStopTimePDFArray(Lv, targetTier, MinNum, targetExist = 0, sameTierExist = 0):
'''
Returns the Probability Density/mass Function array of the "rolling" you need for "roll until reach the min number target card" policy.
lv - level of the little legend
targetTier - the tier of your target card
MinNum - the minium number of target card to stop rolling
targetExist - the number of your target card that has been drawn
sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist")
Calculates by complementary of A."stop at after t rolling" and B. "rolling t times but get less than minNum" or "stop after 1,2,..,t-1 rolling"
'''
stopTimePDFArray = []
stopTimePDFArray.append(0)
breakThreshold = 1e-2
stopTime = 1
PrSum = 0
while(True):
targetDrawPDFArray = GetCardDrawnPDFArray(Lv, targetTier, stopTime, targetExist, sameTierExist)
PrStopTime = 1 - PrSum - sum(targetDrawPDFArray[0:min(5 * stopTime + 1, MinNum)])
stopTimePDFArray.append(PrStopTime)
if PrStopTime * stopTime < breakThreshold and 1 - PrSum < breakThreshold / 100:
break
stopTime += 1
PrSum += PrStopTime
return np.array(stopTimePDFArray)
def PrintDistribution(PDFArray):
getNumArray = np.arange(0, PDFArray.size)
mean = np.sum(getNumArray * PDFArray)
std = np.sum(getNumArray ** 2 * PDFArray) - mean ** 2
std = np.sqrt(std)
probSum = 0
print('num\tprob')
for i in range(0, PDFArray.size):
if (PDFArray[i] > 1e-4):
print('%d \t%.2f%%' %(i, PDFArray[i] * 100))
else:
print('%d \t%.2e' %(i, PDFArray[i]))
probSum += PDFArray[i]
if (probSum > 1 - 1e-4):
break
print('mean: %.2f' %(mean))
print('std: %.2f' %(std))
def PrintRollingProbTable():
print('Lv/Tier\t 1 \t 2 \t 3 \t 4 \t 5 ')
for lv in range(2, 10):
print('%d \t' %(lv), end='')
for tier in range(1, 6):
print('%3d\t' % (probMatrix[lv-1, tier - 1] * 100), end='')
print('')
print('')
print('Poolsize', end='')
for tier in range(1, 6):
print('%3d\t' %(poolSize[tier - 1]), end='')
print('')
if __name__ == '__main__':
if(len(sys.argv) == 1):
print('no more input arguments, please use -h or --help subcommand to get help')
sys.exit(0)
if (sys.argv[1] == '-h' or sys.argv[1] == '--help'):
print('usage:\n1. tfthelper -r \t to show rolling prob table at each level\n2. tfthelper -d <Lv> <target tier> <numRolling> [#target drawn (Default 0)] [#other same tier cards drawn(Default 0)] \t return the distribution and statistics of the number of target you get(given rolling time)\n3.tfthelper -s <Lv> <target tier> <Count for Stopping> [#target drawn(Default 0)] [#other same tier cards drawn(Default 0)]\t return the distribution and statistics of the number of rolling(given the target counts of stopping)')
sys.exit(0)
if (sys.argv[1] == '-r' or sys.argv[1] == '--rtable'):
PrintRollingProbTable()
sys.exit(0)
if (sys.argv[1] == '-d' or sys.argv[1] == '--drawout'):
arg = [0, 0, 0, 0, 0]
for i in range(len(sys.argv) - 2):
arg[i] = int(sys.argv[i + 2])
targetDrawPDFArray = GetCardDrawnPDFArray(arg[0], arg[1], arg[2], arg[3], arg[4])
PrintDistribution(targetDrawPDFArray)
sys.exit(0)
if (sys.argv[1] == '-s' or sys.argv[1] == '--stoptime'):
arg = [0, 0, 0, 0, 0]
for i in range(len(sys.argv) - 2):
arg[i] = int(sys.argv[i + 2])
stoptimePDFArray = GetStopTimePDFArray(arg[0], arg[1], arg[2], arg[3], arg[4])
PrintDistribution(stoptimePDFArray)
sys.exit(0) | en | 0.839657 | Returns the Probability Density/mass Function array of "how many the cards you get" for given rolling times. lv - level of the little legend targetTier - the tier of your target card numRolling - number of rolling targetExist - the number of your target card that has been drawn sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist") ! It's a three stages process. Stage1, get pdf of #targetTier you get(variable 'n', binom distribution) Stage2, condition to fixed n, draw cards in the target pool, get pdf of #targetCard(conditional distribution, HyperGeo) Stage3, synthesize those conditional distribution by Law of total expectation / weighted sum Returns the Probability Density/mass Function array of the "rolling" you need for "roll until reach the min number target card" policy. lv - level of the little legend targetTier - the tier of your target card MinNum - the minium number of target card to stop rolling targetExist - the number of your target card that has been drawn sameTierExist - the number of cards in the same tier with the target that has been drawn(exclude "targetExist") Calculates by complementary of A."stop at after t rolling" and B. "rolling t times but get less than minNum" or "stop after 1,2,..,t-1 rolling" #target drawn (Default 0)] [#other same tier cards drawn(Default 0)] \t return the distribution and statistics of the number of target you get(given rolling time)\n3.tfthelper -s <Lv> <target tier> <Count for Stopping> [#target drawn(Default 0)] [#other same tier cards drawn(Default 0)]\t return the distribution and statistics of the number of rolling(given the target counts of stopping)') | 3.042171 | 3 |
TemaLib/tema/guidance/greedyguidance.py | tema-tut/tema-tg | 0 | 6614592 | <reponame>tema-tut/tema-tg
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Greedy guidance is a breadth first searching algorithm that
returns the shortest path improving coverage. If one of the search
limits is reached, a random path is selected.
Greedy guidance reads the following parameter values:
- max_states (positive integer, default: 10000)
The number of states the breath search algorithm expands in a single
search round.
- max_second (positive value, default: 3600)
The maximum amount of time in seconds a single search can last.
"""
version='wormguidance based on greedyguidance: 0.beta'
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.model.model import Transition
import random
import time
import re
GoodState, OnlyJump, UglyState, SelectJorS = range(4)
class StopCondition:
def __init__(self, prm_src, sized_dict, start_time):
self._dictionary = sized_dict
self._start_time = start_time
self._max_states = prm_src.getParameter("max_states")
self._time_limit = prm_src.getParameter("max_seconds",3600)
def __call__(self):
rval = (time.time()-self._start_time) >= self._time_limit
if self._max_states :
rval = rval or (len(self._dictionary) >= self._max_states)
return rval
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._stored_path=[]
self._random_select=random.Random(time.time()).choice
self._sleep_ts_re = re.compile(r"SLEEPts.*")
def _search_transition_by_name(self, from_state, a_name):
for trs in from_state.getOutTransitions() :
if str( trs.getAction()) == a_name :
return trs
return None
def _get_select_set(self, state, closed):
rval=[]
for trs in state.getOutTransitions():
if str(trs.getDestState()) not in closed:
rval.append(trs)
return rval
def _construct_path_to(self, transition, closed):
rval=[transition]
s=rval[0].getSourceState()
while s :
rval[0:0]=[closed[str(s)]]
s=rval[0].getSourceState()
return rval[1:]
def _breadth_first_search(self, from_state, target_actions):
self.setParameter("max_states",self.getParameter("max_states",10000))
closed={}
waiting=[Transition(None,None,from_state)]
stop_condition=StopCondition(self,closed,self._start_time)
while waiting and not stop_condition() :
current_trans = waiting.pop(0)
current_state = current_trans.getDestState()
if not closed.has_key(str(current_state)) :
closed[str(current_state)] = current_trans
for trs in current_state.getOutTransitions():
if str(trs.getAction()) in target_actions :
self._forbiden_set=set()
return (self._construct_path_to(trs, closed), True)
elif str(trs.getDestState()) in self._forbiden_set:
pass
elif closed.has_key(str(trs.getDestState())) :
pass
else:
waiting.append(trs)
if waiting :
trs=self._random_select(waiting)
#self._forbiden_set = self._forbiden_set | set(closed.keys())
self._forbiden_set = set(closed.keys())
self.log("Forbiden set: %s" % len(self._forbiden_set))
return (self._construct_path_to(trs, closed), False)
self._forbiden_set=set()
return (None, False)
def _search_engine(self, from_state, target_actions):
self._stored_path, success = self._breadth_first_search (from_state,\
target_actions)
if success :
self._search_state = GoodState
elif ( self._search_state == UglyState and random.random() < 0.25) \
or not self._stored_path :
back_path, success = self._breadth_first_search (from_state,\
self._to_sleep_actions)
if success :
self._stored_path = back_path
self._search_state = GoodState
self.log("Moves backwards")
else :
self._search_state = UglyState
if self._search_state == UglyState :
self.log("Jumps randomly forward")
def prepareForRun(self):
nonexit="Nonexisting string"
if self.getParameter("help", nonexit) != nonexit:
print __doc__
raise Exception("Asked only for help")
GuidanceBase.prepareForRun(self)
if len(self._requirements) != 1 :
raise Exception("Needs exactly one requirement")
if not self._testmodel :
raise Exception("Model should be given")
self._stored_path=[]
self._to_sleep_actions =\
self._testmodel.matchedActions(set([self._sleep_ts_re]))
self._last_go_back = False
self._search_state = GoodState
self._forbiden_set = set()
self.log("Wormguidance ready for rocking")
def _trslist_to_str(self,path):
return str([ str(t.getAction()) for t in path])
def suggestAction(self, from_state):
# self.log("DEBUG: new search beginning")
self._start_time=time.time()
if self._stored_path :
if str(self._stored_path[0].getSourceState()) != str(from_state) :
self.log("Throw away: %s"\
% self._trslist_to_str(self._stored_path) )
self._stored_path=[]
self._forbiden_set=set()
# self.log("DEBUG: Ok, käynnistellään etsintää")
if not self._stored_path :
cov_obj=self._requirements[0]
test_model=self._testmodel
# self.log("DEBUG: about to hint")
rex, d = cov_obj.getExecutionHint()
# self.log("DEBUG: about to degrypt")
actions = test_model.matchedActions(rex)
# self.log("DEBUG: tapahtumanimet "+str(actions))
if len(actions) > 0 :
self._search_engine(from_state, actions)
test_model.clearCache()
self.log("Path: %s"\
% self._trslist_to_str(self._stored_path) )
if self._stored_path :
trs = self._stored_path.pop(0)
self.log("Search has been ended")
return trs.getAction()
else:
raise Exception ("Next action can not be found")
| # -*- coding: utf-8 -*-
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Greedy guidance is a breadth first searching algorithm that
returns the shortest path improving coverage. If one of the search
limits is reached, a random path is selected.
Greedy guidance reads the following parameter values:
- max_states (positive integer, default: 10000)
The number of states the breath search algorithm expands in a single
search round.
- max_second (positive value, default: 3600)
The maximum amount of time in seconds a single search can last.
"""
version='wormguidance based on greedyguidance: 0.beta'
from tema.guidance.guidance import Guidance as GuidanceBase
from tema.model.model import Transition
import random
import time
import re
GoodState, OnlyJump, UglyState, SelectJorS = range(4)
class StopCondition:
def __init__(self, prm_src, sized_dict, start_time):
self._dictionary = sized_dict
self._start_time = start_time
self._max_states = prm_src.getParameter("max_states")
self._time_limit = prm_src.getParameter("max_seconds",3600)
def __call__(self):
rval = (time.time()-self._start_time) >= self._time_limit
if self._max_states :
rval = rval or (len(self._dictionary) >= self._max_states)
return rval
class Guidance(GuidanceBase):
def __init__(self):
GuidanceBase.__init__(self)
self._stored_path=[]
self._random_select=random.Random(time.time()).choice
self._sleep_ts_re = re.compile(r"SLEEPts.*")
def _search_transition_by_name(self, from_state, a_name):
for trs in from_state.getOutTransitions() :
if str( trs.getAction()) == a_name :
return trs
return None
def _get_select_set(self, state, closed):
rval=[]
for trs in state.getOutTransitions():
if str(trs.getDestState()) not in closed:
rval.append(trs)
return rval
def _construct_path_to(self, transition, closed):
rval=[transition]
s=rval[0].getSourceState()
while s :
rval[0:0]=[closed[str(s)]]
s=rval[0].getSourceState()
return rval[1:]
def _breadth_first_search(self, from_state, target_actions):
self.setParameter("max_states",self.getParameter("max_states",10000))
closed={}
waiting=[Transition(None,None,from_state)]
stop_condition=StopCondition(self,closed,self._start_time)
while waiting and not stop_condition() :
current_trans = waiting.pop(0)
current_state = current_trans.getDestState()
if not closed.has_key(str(current_state)) :
closed[str(current_state)] = current_trans
for trs in current_state.getOutTransitions():
if str(trs.getAction()) in target_actions :
self._forbiden_set=set()
return (self._construct_path_to(trs, closed), True)
elif str(trs.getDestState()) in self._forbiden_set:
pass
elif closed.has_key(str(trs.getDestState())) :
pass
else:
waiting.append(trs)
if waiting :
trs=self._random_select(waiting)
#self._forbiden_set = self._forbiden_set | set(closed.keys())
self._forbiden_set = set(closed.keys())
self.log("Forbiden set: %s" % len(self._forbiden_set))
return (self._construct_path_to(trs, closed), False)
self._forbiden_set=set()
return (None, False)
def _search_engine(self, from_state, target_actions):
self._stored_path, success = self._breadth_first_search (from_state,\
target_actions)
if success :
self._search_state = GoodState
elif ( self._search_state == UglyState and random.random() < 0.25) \
or not self._stored_path :
back_path, success = self._breadth_first_search (from_state,\
self._to_sleep_actions)
if success :
self._stored_path = back_path
self._search_state = GoodState
self.log("Moves backwards")
else :
self._search_state = UglyState
if self._search_state == UglyState :
self.log("Jumps randomly forward")
def prepareForRun(self):
nonexit="Nonexisting string"
if self.getParameter("help", nonexit) != nonexit:
print __doc__
raise Exception("Asked only for help")
GuidanceBase.prepareForRun(self)
if len(self._requirements) != 1 :
raise Exception("Needs exactly one requirement")
if not self._testmodel :
raise Exception("Model should be given")
self._stored_path=[]
self._to_sleep_actions =\
self._testmodel.matchedActions(set([self._sleep_ts_re]))
self._last_go_back = False
self._search_state = GoodState
self._forbiden_set = set()
self.log("Wormguidance ready for rocking")
def _trslist_to_str(self,path):
return str([ str(t.getAction()) for t in path])
def suggestAction(self, from_state):
# self.log("DEBUG: new search beginning")
self._start_time=time.time()
if self._stored_path :
if str(self._stored_path[0].getSourceState()) != str(from_state) :
self.log("Throw away: %s"\
% self._trslist_to_str(self._stored_path) )
self._stored_path=[]
self._forbiden_set=set()
# self.log("DEBUG: Ok, käynnistellään etsintää")
if not self._stored_path :
cov_obj=self._requirements[0]
test_model=self._testmodel
# self.log("DEBUG: about to hint")
rex, d = cov_obj.getExecutionHint()
# self.log("DEBUG: about to degrypt")
actions = test_model.matchedActions(rex)
# self.log("DEBUG: tapahtumanimet "+str(actions))
if len(actions) > 0 :
self._search_engine(from_state, actions)
test_model.clearCache()
self.log("Path: %s"\
% self._trslist_to_str(self._stored_path) )
if self._stored_path :
trs = self._stored_path.pop(0)
self.log("Search has been ended")
return trs.getAction()
else:
raise Exception ("Next action can not be found") | en | 0.692617 | # -*- coding: utf-8 -*- # Copyright (c) 2006-2010 Tampere University of Technology # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Greedy guidance is a breadth first searching algorithm that returns the shortest path improving coverage. If one of the search limits is reached, a random path is selected. Greedy guidance reads the following parameter values: - max_states (positive integer, default: 10000) The number of states the breath search algorithm expands in a single search round. - max_second (positive value, default: 3600) The maximum amount of time in seconds a single search can last. #self._forbiden_set = self._forbiden_set | set(closed.keys()) # self.log("DEBUG: new search beginning") # self.log("DEBUG: Ok, käynnistellään etsintää") # self.log("DEBUG: about to hint") # self.log("DEBUG: about to degrypt") # self.log("DEBUG: tapahtumanimet "+str(actions)) | 1.7384 | 2 |
supervisr/core/utils/constants.py | BeryJu/supervisr | 1 | 6614593 | """supervisr core constants and regexps"""
TEST_DOMAIN = 'supervisrtest.beryju.org'
DOMAIN_REGEX = (r'([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.'
r'([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*')
EMAIL_DOMAIN_REGEX = DOMAIN_REGEX
EMAIL_ADDRESS_REGEX = r'[a-zA-Z0-9_.+-/]+'
EMAIL_REGEX = r'%s@%s' % (EMAIL_ADDRESS_REGEX, DOMAIN_REGEX)
UUID_REGEX = r'[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}'
# Regex used to match modules for admin/modules
MOD_REGEX = r'[a-zA-Z0-9/._]+'
SLUG_REGEX = r'[-\w]+'
| """supervisr core constants and regexps"""
TEST_DOMAIN = 'supervisrtest.beryju.org'
DOMAIN_REGEX = (r'([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.'
r'([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*')
EMAIL_DOMAIN_REGEX = DOMAIN_REGEX
EMAIL_ADDRESS_REGEX = r'[a-zA-Z0-9_.+-/]+'
EMAIL_REGEX = r'%s@%s' % (EMAIL_ADDRESS_REGEX, DOMAIN_REGEX)
UUID_REGEX = r'[0-9a-f]{8}-[0-9a-f]{4}-[4][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}'
# Regex used to match modules for admin/modules
MOD_REGEX = r'[a-zA-Z0-9/._]+'
SLUG_REGEX = r'[-\w]+'
| en | 0.490541 | supervisr core constants and regexps # Regex used to match modules for admin/modules | 2.019696 | 2 |
app/config.py | mikeboers/heartbeat | 0 | 6614594 | import datetime
import os
ROOT_PATH = os.path.abspath(os.path.join(__file__, '..', '..'))
DEBUG = bool(os.environ.get("DEBUG"))
# Not the best test, but there it is.
IS_HEROKU = os.environ.get('HOME') == '/app' and '.heroku' in os.environ.get('LIBRARY_PATH', '')
if IS_HEROKU:
TEMPORARY_DATABASE_URI = 'sqlite://'
else:
sqlite_dir = os.path.join(ROOT_PATH, 'var', 'sqlite')
if not os.path.exists(sqlite_dir):
os.makedirs(sqlite_dir)
TEMPORARY_DATABASE_URI = 'sqlite:///%s' % os.path.join(ROOT_PATH, 'var', 'sqlite', 'main.sqlite')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', TEMPORARY_DATABASE_URI)
USERNAME = os.environ.get('USERNAME')
PASSWORD = <PASSWORD>('PASSWORD')
SECRET_KEY = os.environ.get('SECRET_KEY')
NOTIFY_EMAIL = os.environ.get('NOTIFY_EMAIL')
NOTIFY_PROWL = os.environ.get('NOTIFY_PROWL')
NOTIFY_ANDROID = os.environ.get('NOTIFY_ANDROID')
MAIL_SERVER = os.environ.get('MAIL_SERVER') or os.environ.get('POSTMARK_SMTP_SERVER') or 'localhost'
MAIL_PORT = int(os.environ.get('MAIL_PORT', 25))
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or os.environ.get('POSTMARK_API_KEY')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or os.environ.get('POSTMARK_API_KEY')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_DEFAULT_SENDER') or NOTIFY_EMAIL
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=20*365)
| import datetime
import os
ROOT_PATH = os.path.abspath(os.path.join(__file__, '..', '..'))
DEBUG = bool(os.environ.get("DEBUG"))
# Not the best test, but there it is.
IS_HEROKU = os.environ.get('HOME') == '/app' and '.heroku' in os.environ.get('LIBRARY_PATH', '')
if IS_HEROKU:
TEMPORARY_DATABASE_URI = 'sqlite://'
else:
sqlite_dir = os.path.join(ROOT_PATH, 'var', 'sqlite')
if not os.path.exists(sqlite_dir):
os.makedirs(sqlite_dir)
TEMPORARY_DATABASE_URI = 'sqlite:///%s' % os.path.join(ROOT_PATH, 'var', 'sqlite', 'main.sqlite')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', TEMPORARY_DATABASE_URI)
USERNAME = os.environ.get('USERNAME')
PASSWORD = <PASSWORD>('PASSWORD')
SECRET_KEY = os.environ.get('SECRET_KEY')
NOTIFY_EMAIL = os.environ.get('NOTIFY_EMAIL')
NOTIFY_PROWL = os.environ.get('NOTIFY_PROWL')
NOTIFY_ANDROID = os.environ.get('NOTIFY_ANDROID')
MAIL_SERVER = os.environ.get('MAIL_SERVER') or os.environ.get('POSTMARK_SMTP_SERVER') or 'localhost'
MAIL_PORT = int(os.environ.get('MAIL_PORT', 25))
MAIL_USERNAME = os.environ.get('MAIL_USERNAME') or os.environ.get('POSTMARK_API_KEY')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') or os.environ.get('POSTMARK_API_KEY')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_DEFAULT_SENDER') or NOTIFY_EMAIL
PERMANENT_SESSION_LIFETIME = datetime.timedelta(days=20*365)
| en | 0.820803 | # Not the best test, but there it is. | 2.324243 | 2 |
vendor/packages/logilab-astng/builder.py | jgmize/kitsune | 2 | 6614595 | <reponame>jgmize/kitsune<filename>vendor/packages/logilab-astng/builder.py
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:<EMAIL>
# copyright 2003-2010 <NAME>, all rights reserved.
# contact mailto:<EMAIL>
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""The ASTNGBuilder makes astng from living object and / or from compiler.ast
With python >= 2.5, the internal _ast module is used instead
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, dirname, exists, abspath
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from logilab.common.fileutils import norm_read
from logilab.common.modutils import modpath_from_file
from logilab.astng._exceptions import ASTNGBuildingException
from logilab.astng.raw_building import *
try:
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
from logilab.astng._nodes_ast import TreeRebuilder
except ImportError, exc:
from compiler import parse
from logilab.astng import patchcomptransformer
from logilab.astng._nodes_compiler import TreeRebuilder
# ast NG builder ##############################################################
class ASTNGBuilder:
"""provide astng building methods
"""
def __init__(self, manager=None):
if manager is None:
from logilab.astng import MANAGER as manager
self._manager = manager
self._module = None
self._file = None
self._done = None
self.rebuilder = TreeRebuilder(manager)
self._dyn_modname_map = {'gtk': 'gtk._gtk'}
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
self._module = module
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
node = build_module(modname, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError, ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
raise ASTNGBuildingException(msg)
self._file = path
# get module name if necessary, *before modifying sys.path*
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
try:
sys.path.insert(0, dirname(path))
node = self.string_build(data, modname, path)
node.file = abspath(path)
finally:
self._file = None
sys.path.pop(0)
return node
def string_build(self, data, modname='', path=None):
"""build astng from a source code stream (i.e. from an ast)"""
return self.ast_build(parse(data + '\n'), modname, path)
def ast_build(self, node, modname='', path=None):
"""build the astng from AST, return the new tree"""
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
newnode = self.rebuilder.build(node, modname, node_file)
newnode.package = package
return newnode
# astng from living objects ###############################################
#
# this is actually a really minimal representation, including only Module,
# Function and Class nodes and some others as guessed
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if self._done.has_key(obj):
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
if member.func_code.co_filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
continue
object_build_function(node, member, name)
elif isbuiltin(member):
# verify this is not an imported member
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
# verify this is not an imported class
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, (int, long, float, str, unicode)) or member is None:
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def _member_module(self, member):
modname = getattr(member, '__module__', None)
return self._dyn_modname_map.get(modname, modname)
def imported_member(node, member, name):
"""consider a class/builtin member where __module__ != current module name
check if it's sound valid and then add an import node, else use a dummy node
"""
# /!\ some classes like ExtensionClass doesn't have a
# __module__ attribute !
member_module = getattr(member, '__module__', '__builtin__')
try:
getattr(sys.modules[member_module], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, member_module, name)
| # This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:<EMAIL>
# copyright 2003-2010 <NAME>, all rights reserved.
# contact mailto:<EMAIL>
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""The ASTNGBuilder makes astng from living object and / or from compiler.ast
With python >= 2.5, the internal _ast module is used instead
The builder is not thread safe and can't be used to parse different sources
at the same time.
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import splitext, basename, dirname, exists, abspath
from inspect import isfunction, ismethod, ismethoddescriptor, isclass, \
isbuiltin
from inspect import isdatadescriptor
from logilab.common.fileutils import norm_read
from logilab.common.modutils import modpath_from_file
from logilab.astng._exceptions import ASTNGBuildingException
from logilab.astng.raw_building import *
try:
from _ast import PyCF_ONLY_AST
def parse(string):
return compile(string, "<string>", 'exec', PyCF_ONLY_AST)
from logilab.astng._nodes_ast import TreeRebuilder
except ImportError, exc:
from compiler import parse
from logilab.astng import patchcomptransformer
from logilab.astng._nodes_compiler import TreeRebuilder
# ast NG builder ##############################################################
class ASTNGBuilder:
"""provide astng building methods
"""
def __init__(self, manager=None):
if manager is None:
from logilab.astng import MANAGER as manager
self._manager = manager
self._module = None
self._file = None
self._done = None
self.rebuilder = TreeRebuilder(manager)
self._dyn_modname_map = {'gtk': 'gtk._gtk'}
def module_build(self, module, modname=None):
"""build an astng from a living module instance
"""
node = None
self._module = module
path = getattr(module, '__file__', None)
if path is not None:
path_, ext = splitext(module.__file__)
if ext in ('.py', '.pyc', '.pyo') and exists(path_ + '.py'):
node = self.file_build(path_ + '.py', modname)
if node is None:
# this is a built-in module
# get a partial representation by introspection
node = self.inspect_build(module, modname=modname, path=path)
return node
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
node = build_module(modname, module.__doc__)
node.file = node.path = path and abspath(path) or path
if self._manager is not None:
self._manager._cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def file_build(self, path, modname=None):
"""build astng from a source code file (i.e. from an ast)
path is expected to be a python source file
"""
try:
data = norm_read(path)
except IOError, ex:
msg = 'Unable to load file %r (%s)' % (path, ex)
raise ASTNGBuildingException(msg)
self._file = path
# get module name if necessary, *before modifying sys.path*
if modname is None:
try:
modname = '.'.join(modpath_from_file(path))
except ImportError:
modname = splitext(basename(path))[0]
# build astng representation
try:
sys.path.insert(0, dirname(path))
node = self.string_build(data, modname, path)
node.file = abspath(path)
finally:
self._file = None
sys.path.pop(0)
return node
def string_build(self, data, modname='', path=None):
"""build astng from a source code stream (i.e. from an ast)"""
return self.ast_build(parse(data + '\n'), modname, path)
def ast_build(self, node, modname='', path=None):
"""build the astng from AST, return the new tree"""
if path is not None:
node_file = abspath(path)
else:
node_file = '<?>'
if modname.endswith('.__init__'):
modname = modname[:-9]
package = True
else:
package = path and path.find('__init__.py') > -1 or False
newnode = self.rebuilder.build(node, modname, node_file)
newnode.package = package
return newnode
# astng from living objects ###############################################
#
# this is actually a really minimal representation, including only Module,
# Function and Class nodes and some others as guessed
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if self._done.has_key(obj):
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
if member.func_code.co_filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
continue
object_build_function(node, member, name)
elif isbuiltin(member):
# verify this is not an imported member
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
# verify this is not an imported class
if self._member_module(member) != self._module.__name__:
imported_member(node, member, name)
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif isinstance(member, (int, long, float, str, unicode)) or member is None:
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def _member_module(self, member):
modname = getattr(member, '__module__', None)
return self._dyn_modname_map.get(modname, modname)
def imported_member(node, member, name):
"""consider a class/builtin member where __module__ != current module name
check if it's sound valid and then add an import node, else use a dummy node
"""
# /!\ some classes like ExtensionClass doesn't have a
# __module__ attribute !
member_module = getattr(member, '__module__', '__builtin__')
try:
getattr(sys.modules[member_module], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, member_module, name) | en | 0.817776 | # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved. # contact http://www.logilab.fr/ -- mailto:<EMAIL> # copyright 2003-2010 <NAME>, all rights reserved. # contact mailto:<EMAIL> # # This file is part of logilab-astng. # # logilab-astng is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 2.1 of the License, or (at your # option) any later version. # # logilab-astng is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License # for more details. # # You should have received a copy of the GNU Lesser General Public License along # with logilab-astng. If not, see <http://www.gnu.org/licenses/>. The ASTNGBuilder makes astng from living object and / or from compiler.ast With python >= 2.5, the internal _ast module is used instead The builder is not thread safe and can't be used to parse different sources at the same time. # ast NG builder ############################################################## provide astng building methods build an astng from a living module instance # this is a built-in module # get a partial representation by introspection build astng from a living module (i.e. using inspect) this is used when there is no python source code available (either because it's a built-in module or because the .py is not available) build astng from a source code file (i.e. from an ast) path is expected to be a python source file # get module name if necessary, *before modifying sys.path* # build astng representation build astng from a source code stream (i.e. from an ast) build the astng from AST, return the new tree # astng from living objects ############################################### # # this is actually a really minimal representation, including only Module, # Function and Class nodes and some others as guessed recursive method which create a partial ast from real objects (only function, class, and method are handled) # damned ExtensionClass.Base, I know you're there ! # verify this is not an imported function # verify this is not an imported member # verify this is not an imported class # recursion # create an empty node so that the name is actually defined consider a class/builtin member where __module__ != current module name check if it's sound valid and then add an import node, else use a dummy node # /!\ some classes like ExtensionClass doesn't have a # __module__ attribute ! | 1.513421 | 2 |
application/telegram/__init__.py | StefanKuppelwieser/ogame | 1 | 6614596 | import requests
class Telegram(object):
def __init__(self, token, chat):
self.TOKEN = token
self.CHAT = chat
self.URL = "https://api.telegram.org/bot{}".format(self.TOKEN)
def get_url(self, url):
try:
response = requests.get(url)
content = response.content.decode("utf8")
return content
except:
return self.get_url(url)
def send_message(self, text):
try:
url = self.URL + "/sendMessage?text={}&chat_id={}".format(text, self.CHAT)
self.get_url(url)
except:
return self.send_message(text) | import requests
class Telegram(object):
def __init__(self, token, chat):
self.TOKEN = token
self.CHAT = chat
self.URL = "https://api.telegram.org/bot{}".format(self.TOKEN)
def get_url(self, url):
try:
response = requests.get(url)
content = response.content.decode("utf8")
return content
except:
return self.get_url(url)
def send_message(self, text):
try:
url = self.URL + "/sendMessage?text={}&chat_id={}".format(text, self.CHAT)
self.get_url(url)
except:
return self.send_message(text) | none | 1 | 2.778337 | 3 | |
allennlp/modules/token_embedders/empty_embedder.py | nadgeri14/allennlp | 2 | 6614597 | import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("empty")
class EmptyEmbedder(TokenEmbedder):
"""
Assumes you want to completely ignore the output of a `TokenIndexer` for some reason, and does
not return anything when asked to embed it.
You should almost never need to use this; normally you would just not use a particular
`TokenIndexer`. It's only in very rare cases, like simplicity in data processing for language
modeling (where we use just one `TextField` to handle input embedding and computing target ids),
where you might want to use this.
"""
def __init__(self) -> None:
super().__init__()
def get_output_dim(self):
return 0
def forward(self, *inputs, **kwargs) -> torch.Tensor:
return None
| import torch
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
@TokenEmbedder.register("empty")
class EmptyEmbedder(TokenEmbedder):
"""
Assumes you want to completely ignore the output of a `TokenIndexer` for some reason, and does
not return anything when asked to embed it.
You should almost never need to use this; normally you would just not use a particular
`TokenIndexer`. It's only in very rare cases, like simplicity in data processing for language
modeling (where we use just one `TextField` to handle input embedding and computing target ids),
where you might want to use this.
"""
def __init__(self) -> None:
super().__init__()
def get_output_dim(self):
return 0
def forward(self, *inputs, **kwargs) -> torch.Tensor:
return None
| en | 0.927042 | Assumes you want to completely ignore the output of a `TokenIndexer` for some reason, and does not return anything when asked to embed it. You should almost never need to use this; normally you would just not use a particular `TokenIndexer`. It's only in very rare cases, like simplicity in data processing for language modeling (where we use just one `TextField` to handle input embedding and computing target ids), where you might want to use this. | 2.956668 | 3 |
MPCGUACC-0.0.2/RaspberryPi/CameraModule/src/main.py | hyu1834/-Stereoscopic-Point-Cloud-Generation-Using-Array-of-Commodity-Cameras | 1 | 6614598 | #Standard Import
import re
import os
import sys
import time
#3rd Parties Import
import RPi.GPIO as GPIO
#Local Import
import picamera_utils
import opencv_utils
# import network_utils
terminate_pin = 20
capture_pin = 21
def getrevision():
revision = "unknown"
with open('/proc/cmdline', 'r') as f:
line = f.readline()
m = re.search('bcm2708.boardrev=(0x[0123456789abcdef]*) ', line)
revision = m.group(1)
return revision
def print_usage():
print("Usage: python main.py [options value]")
print("Options:")
print("\t\t-r, --resolution:")
print("\t\t\tVGA/480 - Image with VGA/480p")
print("\t\t\tHD720/720 - Image with HD720/720p")
print("\t\t\tHD1080/1080 - Image with HD1080/1080p")
print("\t\t\t5MP - Image with full 5MP")
print("\t\t\t8MP - Image with full 8MP (Only available on Raspberry Pi camera V2.3")
print("\t\t-id, --camera_id:")
print("\t\t\tCamera ID")
print("\t\t-o, --output_directory:")
print("\t\t\tImage output directory")
exit(0)
def prase_arguement(args):
resolution = picamera_utils.Image_Resolution.HD720
camera_id = "1"
output_directory = "./"
index = 0
arg_len = len(args)
# prase arguements
while(index < arg_len):
if "-h" == args[index] or "--help" == args[index]:
print_usage()
elif "-r" == args[index] or "--resolution" == args[index]:
index += 1
if(index < arg_len):
if args[index] == "VGA" or args[index] == "480":
resolution = picamera_utils.Image_Resolution.VGA
elif args[index] == "HD720" or args[index] == "720":
resolution = picamera_utils.Image_Resolution.HD720
elif args[index] == "HD1080" or args[index] == "1080":
resolution = picamera_utils.Image_Resolution.HD1080
elif args[index] == "5MP":
resolution = picamera_utils.Image_Resolution.HD5MP
elif args[index] == "8MP":
resolution = picamera_utils.Image_Resolution.HD8MP
else:
print("Resolution: %s not supported"%args[index])
exit(0)
else:
print("Da")
elif "-id" == args[index] or "--camera_id" == args[index]:
index += 1
if(index < arg_len):
camera_id = args[index]
elif "-o" == args[index] or "--output_directory" == args[index]:
index += 1
if(index < arg_len):
output_directory = args[index]
else:
print("Unsupported option: %s"%args[index])
exit(0)
index += 1
return resolution, camera_id, output_directory
def main():
preview_stream = True
image_format = picamera_utils.Image_Format.BGR
image_extension = picamera_utils.Image_Format.BMP
# prase arguements
resolution, camera_id, output_directory = prase_arguement(sys.argv[1:])
#GPIO pin setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(terminate_pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(capture_pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Picamera Instance
picamera = picamera_utils.PiCamera_Utils(resolution,
picamera_version = picamera_utils.PiCamera_Version.V1,
camera_sensor_mode = picamera_utils.Camera_Sensor_Mode.V1_5MP,
capture_mode = picamera_utils.Capture_Mode.IMAGE,
rotation = 180, use_video_port = False)
# preview stream for easier capture
if preview_stream:
picamera.start_preview_stream()
count = 1
print("Done Init, waiting for input")
try:
while(1):
if GPIO.input(capture_pin):
print("Capturing")
picamera.capture_save_raw_image(os.path.join(output_directory, "%s_%s"%(camera_id, count)),
image_format = image_format,
image_extension = image_extension
)
print("Done Capture: %s"%count)
count += 1
if GPIO.input(terminate_pin):
break
except KeyboardInterrupt:
print("All Completed")
# stop all stream before killing the camera process
picamera.stop_preview_stream()
picamera.close()
if __name__ == '__main__':
main()
| #Standard Import
import re
import os
import sys
import time
#3rd Parties Import
import RPi.GPIO as GPIO
#Local Import
import picamera_utils
import opencv_utils
# import network_utils
terminate_pin = 20
capture_pin = 21
def getrevision():
revision = "unknown"
with open('/proc/cmdline', 'r') as f:
line = f.readline()
m = re.search('bcm2708.boardrev=(0x[0123456789abcdef]*) ', line)
revision = m.group(1)
return revision
def print_usage():
print("Usage: python main.py [options value]")
print("Options:")
print("\t\t-r, --resolution:")
print("\t\t\tVGA/480 - Image with VGA/480p")
print("\t\t\tHD720/720 - Image with HD720/720p")
print("\t\t\tHD1080/1080 - Image with HD1080/1080p")
print("\t\t\t5MP - Image with full 5MP")
print("\t\t\t8MP - Image with full 8MP (Only available on Raspberry Pi camera V2.3")
print("\t\t-id, --camera_id:")
print("\t\t\tCamera ID")
print("\t\t-o, --output_directory:")
print("\t\t\tImage output directory")
exit(0)
def prase_arguement(args):
resolution = picamera_utils.Image_Resolution.HD720
camera_id = "1"
output_directory = "./"
index = 0
arg_len = len(args)
# prase arguements
while(index < arg_len):
if "-h" == args[index] or "--help" == args[index]:
print_usage()
elif "-r" == args[index] or "--resolution" == args[index]:
index += 1
if(index < arg_len):
if args[index] == "VGA" or args[index] == "480":
resolution = picamera_utils.Image_Resolution.VGA
elif args[index] == "HD720" or args[index] == "720":
resolution = picamera_utils.Image_Resolution.HD720
elif args[index] == "HD1080" or args[index] == "1080":
resolution = picamera_utils.Image_Resolution.HD1080
elif args[index] == "5MP":
resolution = picamera_utils.Image_Resolution.HD5MP
elif args[index] == "8MP":
resolution = picamera_utils.Image_Resolution.HD8MP
else:
print("Resolution: %s not supported"%args[index])
exit(0)
else:
print("Da")
elif "-id" == args[index] or "--camera_id" == args[index]:
index += 1
if(index < arg_len):
camera_id = args[index]
elif "-o" == args[index] or "--output_directory" == args[index]:
index += 1
if(index < arg_len):
output_directory = args[index]
else:
print("Unsupported option: %s"%args[index])
exit(0)
index += 1
return resolution, camera_id, output_directory
def main():
preview_stream = True
image_format = picamera_utils.Image_Format.BGR
image_extension = picamera_utils.Image_Format.BMP
# prase arguements
resolution, camera_id, output_directory = prase_arguement(sys.argv[1:])
#GPIO pin setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(terminate_pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(capture_pin, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# Picamera Instance
picamera = picamera_utils.PiCamera_Utils(resolution,
picamera_version = picamera_utils.PiCamera_Version.V1,
camera_sensor_mode = picamera_utils.Camera_Sensor_Mode.V1_5MP,
capture_mode = picamera_utils.Capture_Mode.IMAGE,
rotation = 180, use_video_port = False)
# preview stream for easier capture
if preview_stream:
picamera.start_preview_stream()
count = 1
print("Done Init, waiting for input")
try:
while(1):
if GPIO.input(capture_pin):
print("Capturing")
picamera.capture_save_raw_image(os.path.join(output_directory, "%s_%s"%(camera_id, count)),
image_format = image_format,
image_extension = image_extension
)
print("Done Capture: %s"%count)
count += 1
if GPIO.input(terminate_pin):
break
except KeyboardInterrupt:
print("All Completed")
# stop all stream before killing the camera process
picamera.stop_preview_stream()
picamera.close()
if __name__ == '__main__':
main()
| en | 0.526173 | #Standard Import #3rd Parties Import #Local Import # import network_utils # prase arguements # prase arguements #GPIO pin setup # Picamera Instance # preview stream for easier capture # stop all stream before killing the camera process | 2.436696 | 2 |
faster_rcnn/predict-person.py | shijx12/DeepSim | 27 | 6614599 | <gh_stars>10-100
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, sys, cv2
import argparse
import os.path as osp
import glob
import json
from tqdm import tqdm
this_dir = osp.dirname(__file__)
print(this_dir)
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg
from lib.fast_rcnn.test import im_detect
from lib.fast_rcnn.nms_wrapper import nms
from lib.utils.timer import Timer
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# CLASSES = ('__background__','person','bike','motorbike','car','bus')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default='0', type=str)
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
if args.model == ' ' or not os.path.exists(args.model):
print ('current path is ' + os.path.abspath(__file__))
raise IOError(('Error: Model not found.\n'))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(os.path.join(args.output_dir, 'bbox')):
os.makedirs(os.path.join(args.output_dir, 'bbox'))
# init session
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# load network
net = get_network(args.demo_net)
# load model
print ('Loading network {:s}... '.format(args.demo_net)),
saver = tf.train.Saver()
saver.restore(sess, args.model)
# saver.restore(sess, tf.train.latest_checkpoint(args.model))
print (' done.')
CONF_THRESH = 0.8
NMS_THRESH = 0.3
cls_ind = 15 # person
im_names = os.listdir(args.input_dir)
person_boxes = {}
for im_name in tqdm(im_names):
# Load the demo image
im = cv2.imread(os.path.join(args.input_dir, im_name))
# Detect all object classes and regress object bounds
scores, boxes = im_detect(sess, net, im)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
person_boxes[im_name] = []
for i in keep:
if dets[i, -1] >= CONF_THRESH:
person_boxes[im_name].append(map(int, dets[i, :-1].tolist()))
for box in person_boxes[im_name]:
cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]), (0, 0, 255))
cv2.imwrite(os.path.join(args.output_dir, 'bbox', im_name), im)
json.dump(person_boxes, open(os.path.join(args.output_dir, 'bbox_info.json'), 'w'))
| import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import os, sys, cv2
import argparse
import os.path as osp
import glob
import json
from tqdm import tqdm
this_dir = osp.dirname(__file__)
print(this_dir)
from lib.networks.factory import get_network
from lib.fast_rcnn.config import cfg
from lib.fast_rcnn.test import im_detect
from lib.fast_rcnn.nms_wrapper import nms
from lib.utils.timer import Timer
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
# CLASSES = ('__background__','person','bike','motorbike','car','bus')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default='0', type=str)
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
parser.add_argument('--input_dir', type=str)
parser.add_argument('--output_dir', type=str)
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
if args.model == ' ' or not os.path.exists(args.model):
print ('current path is ' + os.path.abspath(__file__))
raise IOError(('Error: Model not found.\n'))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if not os.path.exists(os.path.join(args.output_dir, 'bbox')):
os.makedirs(os.path.join(args.output_dir, 'bbox'))
# init session
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
# load network
net = get_network(args.demo_net)
# load model
print ('Loading network {:s}... '.format(args.demo_net)),
saver = tf.train.Saver()
saver.restore(sess, args.model)
# saver.restore(sess, tf.train.latest_checkpoint(args.model))
print (' done.')
CONF_THRESH = 0.8
NMS_THRESH = 0.3
cls_ind = 15 # person
im_names = os.listdir(args.input_dir)
person_boxes = {}
for im_name in tqdm(im_names):
# Load the demo image
im = cv2.imread(os.path.join(args.input_dir, im_name))
# Detect all object classes and regress object bounds
scores, boxes = im_detect(sess, net, im)
cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
person_boxes[im_name] = []
for i in keep:
if dets[i, -1] >= CONF_THRESH:
person_boxes[im_name].append(map(int, dets[i, :-1].tolist()))
for box in person_boxes[im_name]:
cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]), (0, 0, 255))
cv2.imwrite(os.path.join(args.output_dir, 'bbox', im_name), im)
json.dump(person_boxes, open(os.path.join(args.output_dir, 'bbox_info.json'), 'w')) | en | 0.313419 | # CLASSES = ('__background__','person','bike','motorbike','car','bus') Parse input arguments. # Use RPN for proposals # init session # load network # load model # saver.restore(sess, tf.train.latest_checkpoint(args.model)) # person # Load the demo image # Detect all object classes and regress object bounds | 1.947417 | 2 |
src/Kriya-Grammar-Xtrctr/pre-process/clean_corpus.py | sfu-natlang/Kriya | 25 | 6614600 | <reponame>sfu-natlang/Kriya
#! /usr/bin/python
# This program cleans the parallel corpus by ignoring lines when either the source or the target or both are empty
__author__="bsa33"
__date__ ="$Jan 11, 2010 3:32:35 PM$"
import sys
MAX_SENT_LEN = 80
def cleanCorpus(src_file, tgt_file, src_cleaned, tgt_cleaned):
line_cnt = 0
filt_line_cnt = 0
sF = open(src_file, 'r')
tF = open(tgt_file, 'r')
oF1 = open(src_cleaned, 'w')
oF2 = open(tgt_cleaned, 'w')
while True:
# read source and target lines
src_line = sF.readline()
tgt_line = tF.readline()
if src_line == '' and tgt_line =='': break
line_cnt += 1
src_line = src_line.strip()
tgt_line = tgt_line.strip()
src_len = len( src_line.split() )
tgt_len = len( tgt_line.split() )
if src_len == 0 or src_len > MAX_SENT_LEN: continue
elif tgt_len == 0 or tgt_len > MAX_SENT_LEN: continue
else:
oF1.write( "%s\n" % src_line )
oF2.write( "%s\n" % tgt_line )
filt_line_cnt += 1
sF.close()
tF.close()
oF1.close()
oF2.close()
print "# of lines in corpus before cleaning : %d" % line_cnt
print "# of lines in corpus after cleaning : %d" % filt_line_cnt
print "# of lines ignored in cleaning : %d" % (line_cnt - filt_line_cnt)
return None
def main():
global MAX_SENT_LEN
d_dir = sys.argv[1]
out_dir = sys.argv[2]
file_prefix = sys.argv[3]
src = sys.argv[4]
tgt = sys.argv[5]
if len(sys.argv) == 7:
try:
MAX_SENT_LEN = int(sys.argv[6])
except TypeError:
print "\nERROR: Last argument should be the maximum sentence length (default 80)\n"
sys.exit(1)
if not d_dir.endswith("/"): d_dir += "/"
if not out_dir.endswith("/"): out_dir += "/"
src_file = d_dir + file_prefix + "." + src
tgt_file = d_dir + file_prefix + "." + tgt
src_cleaned = out_dir + file_prefix + ".cln." + src
tgt_cleaned = out_dir + file_prefix + ".cln." + tgt
cleanCorpus(src_file, tgt_file, src_cleaned, tgt_cleaned)
if __name__ == "__main__":
main()
| #! /usr/bin/python
# This program cleans the parallel corpus by ignoring lines when either the source or the target or both are empty
__author__="bsa33"
__date__ ="$Jan 11, 2010 3:32:35 PM$"
import sys
MAX_SENT_LEN = 80
def cleanCorpus(src_file, tgt_file, src_cleaned, tgt_cleaned):
line_cnt = 0
filt_line_cnt = 0
sF = open(src_file, 'r')
tF = open(tgt_file, 'r')
oF1 = open(src_cleaned, 'w')
oF2 = open(tgt_cleaned, 'w')
while True:
# read source and target lines
src_line = sF.readline()
tgt_line = tF.readline()
if src_line == '' and tgt_line =='': break
line_cnt += 1
src_line = src_line.strip()
tgt_line = tgt_line.strip()
src_len = len( src_line.split() )
tgt_len = len( tgt_line.split() )
if src_len == 0 or src_len > MAX_SENT_LEN: continue
elif tgt_len == 0 or tgt_len > MAX_SENT_LEN: continue
else:
oF1.write( "%s\n" % src_line )
oF2.write( "%s\n" % tgt_line )
filt_line_cnt += 1
sF.close()
tF.close()
oF1.close()
oF2.close()
print "# of lines in corpus before cleaning : %d" % line_cnt
print "# of lines in corpus after cleaning : %d" % filt_line_cnt
print "# of lines ignored in cleaning : %d" % (line_cnt - filt_line_cnt)
return None
def main():
global MAX_SENT_LEN
d_dir = sys.argv[1]
out_dir = sys.argv[2]
file_prefix = sys.argv[3]
src = sys.argv[4]
tgt = sys.argv[5]
if len(sys.argv) == 7:
try:
MAX_SENT_LEN = int(sys.argv[6])
except TypeError:
print "\nERROR: Last argument should be the maximum sentence length (default 80)\n"
sys.exit(1)
if not d_dir.endswith("/"): d_dir += "/"
if not out_dir.endswith("/"): out_dir += "/"
src_file = d_dir + file_prefix + "." + src
tgt_file = d_dir + file_prefix + "." + tgt
src_cleaned = out_dir + file_prefix + ".cln." + src
tgt_cleaned = out_dir + file_prefix + ".cln." + tgt
cleanCorpus(src_file, tgt_file, src_cleaned, tgt_cleaned)
if __name__ == "__main__":
main() | en | 0.803212 | #! /usr/bin/python # This program cleans the parallel corpus by ignoring lines when either the source or the target or both are empty # read source and target lines | 2.864654 | 3 |
Starry_Night/code.py | claycooper/Adafruit_Learning_System_Guides | 0 | 6614601 | # SPDX-FileCopyrightText: 2018 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
from adafruit_crickit import crickit
# Create one motor on seesaw motor port #1
motor = crickit.dc_motor_1
motor.throttle = 0.5 # half speed forward
# Create drive (PWM) object for the lights on Drive 1
lights = crickit.drive_1
lights.frequency = 1000 # Our default frequency is 1KHz
while True:
lights.fraction = 0.5 # half on
time.sleep(0.8)
lights.fraction = 0.2 # dim
time.sleep(0.1)
# and repeat!
| # SPDX-FileCopyrightText: 2018 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
from adafruit_crickit import crickit
# Create one motor on seesaw motor port #1
motor = crickit.dc_motor_1
motor.throttle = 0.5 # half speed forward
# Create drive (PWM) object for the lights on Drive 1
lights = crickit.drive_1
lights.frequency = 1000 # Our default frequency is 1KHz
while True:
lights.fraction = 0.5 # half on
time.sleep(0.8)
lights.fraction = 0.2 # dim
time.sleep(0.1)
# and repeat!
| en | 0.619079 | # SPDX-FileCopyrightText: 2018 <NAME> for Adafruit Industries # # SPDX-License-Identifier: MIT # Create one motor on seesaw motor port #1 # half speed forward # Create drive (PWM) object for the lights on Drive 1 # Our default frequency is 1KHz # half on # dim # and repeat! | 2.644017 | 3 |
scripts/plotData/data/eispiceBattery4.py | oeshine/batterySimulator | 5 | 6614602 | <reponame>oeshine/batterySimulator<filename>scripts/plotData/data/eispiceBattery4.py
import eispice
cct = eispice.Circuit("battery stimulator not discharge not charge")
def generateSpiceModel():
spiceModel=[]
Vsp = ('Vsp',eispice.V('V1+',eispice.GND,3.65))#eispice source model
Rsp = ('Rsp',eispice.R('V1+','V2-',0.0155))#eispice resistance model
Rb=('Rb',eispice.R('V2-',eispice.GND,240))
Vsp2=('Vsp2',eispice.V('V2-','V2+',3.65))
Rsp2=('Rsp2',eispice.R('V2+','V3-',0.0155))
Rb2=('Rb2',eispice.R('V2-','V3-',240))
Vsp3=('Vsp3',eispice.V('V3-','V3+',3.65))
Rsp3=('Rsp3',eispice.R('V3+','V4-',0.0155))
Rb3=('Rb3',eispice.R('V3-','V4-',240))
Vsp4=('Vsp4',eispice.V('V4-','V4+',3.65))
Rsp4=('Rsp4',eispice.R('V4+',eispice.GND,0.0155))
Rb4=('Rb4',eispice.R('V4-',eispice.GND,240))
spiceModel.append(Vsp)
spiceModel.append(Rsp) #set a signel battery spice model
spiceModel.append(Rb)
spiceModel.append(Vsp2)
spiceModel.append(Rsp2)
spiceModel.append(Rb2)
spiceModel.append(Vsp3)
spiceModel.append(Rsp3)
spiceModel.append(Rb3)
spiceModel.append(Vsp4)
spiceModel.append(Rsp4)
spiceModel.append(Rb4)
return spiceModel
cct.batteries = generateSpiceModel();
cct.tran('0.01n','0.02n')
print 'current of Vsp is:', cct.i['Vsp']('0.01n') #unit is A
print 'current of Vsp2 is:', cct.i['Vsp2']('0.01n') #unit is A
print 'current of Vsp3 is:', cct.i['Vsp3']('0.01n') #unit is A
print 'current of Vsp4 is:', cct.i['Vsp4']('0.01n') #unit is A
"""result
current of Vsp is: 117.719124457
current of Vsp2 is: -117.749539159
current of Vsp3 is: -117.749539159
current of Vsp4 is: -117.749539159
"""
| import eispice
cct = eispice.Circuit("battery stimulator not discharge not charge")
def generateSpiceModel():
spiceModel=[]
Vsp = ('Vsp',eispice.V('V1+',eispice.GND,3.65))#eispice source model
Rsp = ('Rsp',eispice.R('V1+','V2-',0.0155))#eispice resistance model
Rb=('Rb',eispice.R('V2-',eispice.GND,240))
Vsp2=('Vsp2',eispice.V('V2-','V2+',3.65))
Rsp2=('Rsp2',eispice.R('V2+','V3-',0.0155))
Rb2=('Rb2',eispice.R('V2-','V3-',240))
Vsp3=('Vsp3',eispice.V('V3-','V3+',3.65))
Rsp3=('Rsp3',eispice.R('V3+','V4-',0.0155))
Rb3=('Rb3',eispice.R('V3-','V4-',240))
Vsp4=('Vsp4',eispice.V('V4-','V4+',3.65))
Rsp4=('Rsp4',eispice.R('V4+',eispice.GND,0.0155))
Rb4=('Rb4',eispice.R('V4-',eispice.GND,240))
spiceModel.append(Vsp)
spiceModel.append(Rsp) #set a signel battery spice model
spiceModel.append(Rb)
spiceModel.append(Vsp2)
spiceModel.append(Rsp2)
spiceModel.append(Rb2)
spiceModel.append(Vsp3)
spiceModel.append(Rsp3)
spiceModel.append(Rb3)
spiceModel.append(Vsp4)
spiceModel.append(Rsp4)
spiceModel.append(Rb4)
return spiceModel
cct.batteries = generateSpiceModel();
cct.tran('0.01n','0.02n')
print 'current of Vsp is:', cct.i['Vsp']('0.01n') #unit is A
print 'current of Vsp2 is:', cct.i['Vsp2']('0.01n') #unit is A
print 'current of Vsp3 is:', cct.i['Vsp3']('0.01n') #unit is A
print 'current of Vsp4 is:', cct.i['Vsp4']('0.01n') #unit is A
"""result
current of Vsp is: 117.719124457
current of Vsp2 is: -117.749539159
current of Vsp3 is: -117.749539159
current of Vsp4 is: -117.749539159
""" | en | 0.802849 | #eispice source model #eispice resistance model #set a signel battery spice model #unit is A #unit is A #unit is A #unit is A result current of Vsp is: 117.719124457 current of Vsp2 is: -117.749539159 current of Vsp3 is: -117.749539159 current of Vsp4 is: -117.749539159 | 2.422905 | 2 |
source/tf_loss.py | Bradan/deepwriting | 87 | 6614603 | <reponame>Bradan/deepwriting
import tensorflow as tf
import numpy as np
def logli_normal_bivariate(x, mu, sigma, rho, reduce_sum=False):
"""
Bivariate Gaussian log-likelihood. Rank of arguments is expected to be 3.
Args:
x: data samples with shape (batch_size, num_time_steps, data_size).
mu:
sigma: standard deviation.
rho:
reduce_sum: False, None or list of axes.
Returns:
"""
last_axis = tf.rank(x)-1
x1, x2 = tf.split(x, 2, axis=last_axis)
mu1, mu2 = tf.split(mu, 2, axis=last_axis)
sigma1, sigma2 = tf.split(sigma, 2, axis=last_axis)
with tf.name_scope('logli_normal_bivariate'):
x_mu1 = tf.subtract(x1, mu1)
x_mu2 = tf.subtract(x2, mu2)
Z = tf.square(tf.div(x_mu1, tf.maximum(1e-9, sigma1))) + \
tf.square(tf.div(x_mu2, tf.maximum(1e-9, sigma2))) - \
2*tf.div(tf.multiply(rho, tf.multiply(x_mu1, x_mu2)), tf.maximum(1e-9, tf.multiply(sigma1, sigma2)))
rho_square_term = tf.maximum(1e-9, 1-tf.square(rho))
log_regularize_term = tf.log(tf.maximum(1e-9, 2*np.pi*tf.multiply(tf.multiply(sigma1, sigma2), tf.sqrt(rho_square_term)) ))
log_power_e = tf.div(Z, 2*rho_square_term)
result = -(log_regularize_term + log_power_e)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def logli_normal_diag_cov(x, mu, sigma, reduce_sum=False):
"""
Log-likelihood of Gaussian with diagonal covariance matrix.
Args:
x:
mu:
sigma: standard deviation.
reduce_sum:
Returns:
"""
with tf.name_scope('logli_normal_diag_cov'):
ssigma2 = tf.maximum(1e-6, tf.square(sigma)*2)
denom_log = tf.log(tf.sqrt(np.pi * ssigma2))
norm = tf.square(tf.subtract(x, mu))
z = tf.div(norm, ssigma2)
result = -(z + denom_log)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def logli_bernoulli(x, theta, reduce_sum=False):
"""
Bernoulli log-likelihood.
Args:
x:
theta:
reduce_sum:
Returns:
"""
with tf.name_scope('logli_bernoulli'):
result = (tf.multiply(x, tf.log(tf.maximum(1e-9, theta))) + tf.multiply((1 - x), tf.log(tf.maximum(1e-9, 1 - theta))))
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def kld_normal_isotropic(mu1, sigma1, mu2, sigma2, reduce_sum=False):
"""
Kullback-Leibler divergence between two isotropic Gaussian distributions.
Args:
mu1:
sigma1: standard deviation.
mu2:
sigma2: standard deviation.
reduce_sum:
Returns:
"""
with tf.name_scope("kld_normal_isotropic"):
result = tf.reduce_sum(0.5 * (
2 * tf.log(tf.maximum(1e-9, sigma2))
- 2 * tf.log(tf.maximum(1e-9, sigma1))
+ (tf.square(sigma1) + tf.square(mu1 - mu2)) / tf.maximum(1e-9, (tf.square(sigma2))) - 1), keepdims=True, axis=-1)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum) | import tensorflow as tf
import numpy as np
def logli_normal_bivariate(x, mu, sigma, rho, reduce_sum=False):
"""
Bivariate Gaussian log-likelihood. Rank of arguments is expected to be 3.
Args:
x: data samples with shape (batch_size, num_time_steps, data_size).
mu:
sigma: standard deviation.
rho:
reduce_sum: False, None or list of axes.
Returns:
"""
last_axis = tf.rank(x)-1
x1, x2 = tf.split(x, 2, axis=last_axis)
mu1, mu2 = tf.split(mu, 2, axis=last_axis)
sigma1, sigma2 = tf.split(sigma, 2, axis=last_axis)
with tf.name_scope('logli_normal_bivariate'):
x_mu1 = tf.subtract(x1, mu1)
x_mu2 = tf.subtract(x2, mu2)
Z = tf.square(tf.div(x_mu1, tf.maximum(1e-9, sigma1))) + \
tf.square(tf.div(x_mu2, tf.maximum(1e-9, sigma2))) - \
2*tf.div(tf.multiply(rho, tf.multiply(x_mu1, x_mu2)), tf.maximum(1e-9, tf.multiply(sigma1, sigma2)))
rho_square_term = tf.maximum(1e-9, 1-tf.square(rho))
log_regularize_term = tf.log(tf.maximum(1e-9, 2*np.pi*tf.multiply(tf.multiply(sigma1, sigma2), tf.sqrt(rho_square_term)) ))
log_power_e = tf.div(Z, 2*rho_square_term)
result = -(log_regularize_term + log_power_e)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def logli_normal_diag_cov(x, mu, sigma, reduce_sum=False):
"""
Log-likelihood of Gaussian with diagonal covariance matrix.
Args:
x:
mu:
sigma: standard deviation.
reduce_sum:
Returns:
"""
with tf.name_scope('logli_normal_diag_cov'):
ssigma2 = tf.maximum(1e-6, tf.square(sigma)*2)
denom_log = tf.log(tf.sqrt(np.pi * ssigma2))
norm = tf.square(tf.subtract(x, mu))
z = tf.div(norm, ssigma2)
result = -(z + denom_log)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def logli_bernoulli(x, theta, reduce_sum=False):
"""
Bernoulli log-likelihood.
Args:
x:
theta:
reduce_sum:
Returns:
"""
with tf.name_scope('logli_bernoulli'):
result = (tf.multiply(x, tf.log(tf.maximum(1e-9, theta))) + tf.multiply((1 - x), tf.log(tf.maximum(1e-9, 1 - theta))))
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum)
def kld_normal_isotropic(mu1, sigma1, mu2, sigma2, reduce_sum=False):
"""
Kullback-Leibler divergence between two isotropic Gaussian distributions.
Args:
mu1:
sigma1: standard deviation.
mu2:
sigma2: standard deviation.
reduce_sum:
Returns:
"""
with tf.name_scope("kld_normal_isotropic"):
result = tf.reduce_sum(0.5 * (
2 * tf.log(tf.maximum(1e-9, sigma2))
- 2 * tf.log(tf.maximum(1e-9, sigma1))
+ (tf.square(sigma1) + tf.square(mu1 - mu2)) / tf.maximum(1e-9, (tf.square(sigma2))) - 1), keepdims=True, axis=-1)
if reduce_sum is False:
return result
else:
return tf.reduce_sum(result, reduce_sum) | en | 0.610063 | Bivariate Gaussian log-likelihood. Rank of arguments is expected to be 3. Args: x: data samples with shape (batch_size, num_time_steps, data_size). mu: sigma: standard deviation. rho: reduce_sum: False, None or list of axes. Returns: Log-likelihood of Gaussian with diagonal covariance matrix. Args: x: mu: sigma: standard deviation. reduce_sum: Returns: Bernoulli log-likelihood. Args: x: theta: reduce_sum: Returns: Kullback-Leibler divergence between two isotropic Gaussian distributions. Args: mu1: sigma1: standard deviation. mu2: sigma2: standard deviation. reduce_sum: Returns: | 2.753245 | 3 |
scrapers/tests/test_manolobase_spider.py | rmaceissoft/django-manolo | 0 | 6614604 | <reponame>rmaceissoft/django-manolo
# -*- coding: utf-8 -*-
import unittest
from datetime import date
from exceptions import NotImplementedError
from scrapy import exceptions
from manolo_scraper.spiders.spiders import ManoloBaseSpider
class TestManoloBaseSpider(unittest.TestCase):
def test_start_date_and_end_date(self):
with self.assertRaises(exceptions.UsageError):
ManoloBaseSpider(date_start='2015-08-20', date_end='2015-08-17', name='manolo')
def test_initial_request(self):
with self.assertRaises(NotImplementedError):
spider = ManoloBaseSpider(name='manolo')
today = date.today()
spider.initial_request(today)
def test_get_date_item(self):
self.assertEqual(ManoloBaseSpider.get_date_item('2015/08/20', '%Y/%m/%d'), '2015-08-20')
def test_days_between_dates(self):
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-20', '2015-08-30'), 10)
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-30', '2015-08-20'), -10)
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-30', '2015-08-30'), 0)
| # -*- coding: utf-8 -*-
import unittest
from datetime import date
from exceptions import NotImplementedError
from scrapy import exceptions
from manolo_scraper.spiders.spiders import ManoloBaseSpider
class TestManoloBaseSpider(unittest.TestCase):
def test_start_date_and_end_date(self):
with self.assertRaises(exceptions.UsageError):
ManoloBaseSpider(date_start='2015-08-20', date_end='2015-08-17', name='manolo')
def test_initial_request(self):
with self.assertRaises(NotImplementedError):
spider = ManoloBaseSpider(name='manolo')
today = date.today()
spider.initial_request(today)
def test_get_date_item(self):
self.assertEqual(ManoloBaseSpider.get_date_item('2015/08/20', '%Y/%m/%d'), '2015-08-20')
def test_days_between_dates(self):
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-20', '2015-08-30'), 10)
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-30', '2015-08-20'), -10)
self.assertEqual(ManoloBaseSpider.days_between_dates('2015-08-30', '2015-08-30'), 0) | en | 0.769321 | # -*- coding: utf-8 -*- | 2.802365 | 3 |
migrations/versions/89df7caa1e08_add_retraction_watch_summary_table.py | ourresearch/journalsdb | 8 | 6614605 | """add retraction watch summary table
Revision ID: <KEY>8
Revises: 738c<PASSWORD>c<PASSWORD>
Create Date: 2021-02-28 12:13:29.644467
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"retraction_summary",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("issn", sa.String(length=9), nullable=True),
sa.Column("journal", sa.Text(), nullable=False),
sa.Column("year", sa.Integer(), nullable=False),
sa.Column("retractions", sa.Integer(), nullable=False),
sa.Column("num_dois", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("issn", "year"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("retraction_summary")
# ### end Alembic commands ###
| """add retraction watch summary table
Revision ID: <KEY>8
Revises: 738c<PASSWORD>c<PASSWORD>
Create Date: 2021-02-28 12:13:29.644467
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<KEY>4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"retraction_summary",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("issn", sa.String(length=9), nullable=True),
sa.Column("journal", sa.Text(), nullable=False),
sa.Column("year", sa.Integer(), nullable=False),
sa.Column("retractions", sa.Integer(), nullable=False),
sa.Column("num_dois", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("issn", "year"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("retraction_summary")
# ### end Alembic commands ###
| en | 0.497932 | add retraction watch summary table Revision ID: <KEY>8 Revises: 738c<PASSWORD>c<PASSWORD> Create Date: 2021-02-28 12:13:29.644467 # revision identifiers, used by Alembic. # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.533098 | 2 |
src/peachyprinter/domain/configuration_manager.py | Createcafe3d/YXE3Dtools | 23 | 6614606 | <reponame>Createcafe3d/YXE3Dtools
class ConfigurationManager(object):
def list(self):
raise NotImplementedException("Abstract Class")
def load(self, printer_name):
raise NotImplementedException("Abstract Class")
def save(self, configuration):
raise NotImplementedException("Abstract Class")
def reset(self, configuration):
raise NotImplementedException("Abstract Class")
def new(self, printer_name):
raise NotImplementedException("Abstract Class")
def get_current_config(self):
raise NotImplementedException("Abstract Class")
| class ConfigurationManager(object):
def list(self):
raise NotImplementedException("Abstract Class")
def load(self, printer_name):
raise NotImplementedException("Abstract Class")
def save(self, configuration):
raise NotImplementedException("Abstract Class")
def reset(self, configuration):
raise NotImplementedException("Abstract Class")
def new(self, printer_name):
raise NotImplementedException("Abstract Class")
def get_current_config(self):
raise NotImplementedException("Abstract Class") | none | 1 | 2.640164 | 3 | |
conan/test/packager_test.py | sourcedelica/conan-package-tools | 0 | 6614607 | import os
import platform
import sys
import unittest
from collections import defaultdict
from conan.builds_generator import BuildConf
from conan.packager import ConanMultiPackager
from conans import tools
from conans.model.ref import ConanFileReference
from conans.util.files import load
from conans.model.profile import Profile
def platform_mock_for(so):
class PlatformInfoMock(object):
def system(self):
return so
return PlatformInfoMock()
class MockRunner(object):
def __init__(self):
self.reset()
self.output = ""
def reset(self):
self.calls = []
def __call__(self, command):
self.calls.append(command)
return 0
def get_profile_from_trace(self, number):
call = self.calls[number]
profile_start = call.find("--profile") + 10
end_profile = call[profile_start:].find(" ") + profile_start
profile_path = call[profile_start: end_profile]
if hasattr(Profile, "loads"): # retrocompatibility
return Profile.loads(load(profile_path))
else:
from conans.client.profile_loader import read_profile
tools.replace_in_file(profile_path, "include", "#include")
# FIXME: Not able to load here the default
return read_profile(profile_path, os.path.dirname(profile_path), None)[0]
def assert_tests_for(self, numbers):
"""Check if executor has ran the builds that are expected.
numbers are integers"""
def assert_profile_for(pr, num):
assert(pr.settings["compiler"] == 'compiler%d' % num)
assert(pr.settings["os"] == 'os%d' % num)
assert(pr.options.as_list() == [('option%d' % num, 'value%d' % num)])
testp_counter = 0
for i, call in enumerate(self.calls):
if call.startswith("conan create"):
profile = self.get_profile_from_trace(i)
assert_profile_for(profile, numbers[testp_counter])
testp_counter += 1
class AppTest(unittest.TestCase):
def setUp(self):
self.runner = MockRunner()
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner)
if "APPVEYOR" in os.environ:
del os.environ["APPVEYOR"]
if "TRAVIS" in os.environ:
del os.environ["TRAVIS"]
def _add_build(self, number, compiler=None, version=None):
self.packager.add({"os": "os%d" % number, "compiler": compiler or "compiler%d" % number,
"compiler.version": version or "4.3"},
{"option%d" % number: "value%d" % number,
"option%d" % number: "value%d" % number})
def test_full_profile(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
self.packager.run_builds(1, 1)
profile = self.runner.get_profile_from_trace(0)
self.assertEquals(profile.settings["os"], "Windows")
self.assertEquals(profile.settings["compiler"], "gcc")
self.assertEquals(profile.options.as_list(), [("option1", "One")])
self.assertEquals(profile.env_values.data[None]["VAR_1"], "ONE")
self.assertEquals(profile.env_values.data[None]["VAR_2"], "TWO")
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing")])
def test_profile_environ(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
with tools.environment_append({"CONAN_BUILD_REQUIRES": "br1/1.0@conan/testing"}):
self.packager.run_builds(1, 1)
profile = self.runner.get_profile_from_trace(0)
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing"),
ConanFileReference.loads("br1/1.0@conan/testing")])
def test_pages(self):
for number in range(10):
self._add_build(number)
# 10 pages, 1 per build
self.packager.run_builds(1, 10)
self.runner.assert_tests_for([0])
# 2 pages, 5 per build
self.runner.reset()
self.packager.run_builds(1, 2)
self.runner.assert_tests_for([0, 2, 4, 6, 8])
self.runner.reset()
self.packager.run_builds(2, 2)
self.runner.assert_tests_for([1, 3, 5, 7, 9])
# 3 pages, 4 builds in page 1 and 3 in the rest of pages
self.runner.reset()
self.packager.run_builds(1, 3)
self.runner.assert_tests_for([0, 3, 6, 9])
self.runner.reset()
self.packager.run_builds(2, 3)
self.runner.assert_tests_for([1, 4, 7])
self.runner.reset()
self.packager.run_builds(3, 3)
self.runner.assert_tests_for([2, 5, 8])
def test_deprecation_gcc(self):
with self.assertRaisesRegexp(Exception, "DEPRECATED GCC MINOR VERSIONS!"):
ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5.4"],
use_docker=True)
def test_32bits_images(self):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True,
docker_32_images=True,
reference="zlib/1.2.11")
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.runner.reset()
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True,
docker_32_images=False)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertNotIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.runner.reset()
with tools.environment_append({"CONAN_DOCKER_32_IMAGES": "1"}):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.assertIn("arch_build=x86\\", self.runner.calls[-1])
def test_docker_gcc(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5"],
use_docker=True)
self._add_build(1, "gcc", "4.3")
self._add_build(2, "gcc", "4.3")
self._add_build(3, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc43", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc43", self.runner.calls[0])
with tools.environment_append({"CONAN_DOCKER_USE_SUDO": "1"}):
self.packager.run_builds(1, 2)
self.assertIn("sudo docker run", self.runner.calls[-1])
# Next build from 4.3 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
def test_docker_clang(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
clang_versions=["3.8", "4.0"],
use_docker=True)
self._add_build(1, "clang", "3.8")
self._add_build(2, "clang", "3.8")
self._add_build(3, "clang", "3.8")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conanclang38", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
# Next build from 3.8 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
def test_docker_gcc_and_clang(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True)
self._add_build(1, "gcc", "5")
self._add_build(2, "gcc", "5")
self._add_build(3, "gcc", "5")
self._add_build(4, "clang", "3.9")
self._add_build(5, "clang", "3.9")
self._add_build(6, "clang", "3.9")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc5", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.assertIn('os=os3', self.runner.calls[5])
self.packager.run_builds(2, 2)
self.assertIn("docker pull lasote/conanclang39", self.runner.calls[16])
self.assertIn('docker run ', self.runner.calls[17])
self.assertIn('os=os4', self.runner.calls[20])
self.assertIn('os=os6', self.runner.calls[21])
def test_upload_false(self):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel", upload=False)
self.assertFalse(packager._upload_enabled())
def test_docker_env_propagated(self):
# test env
with tools.environment_append({"CONAN_FAKE_VAR": "32"}):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True)
self._add_build(1, "gcc", "5")
self.packager.run_builds(1, 1)
self.assertIn('-e CONAN_FAKE_VAR="32"', self.runner.calls[-1])
@unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows")
def test_msvc(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
visual_versions=[15])
self.packager.add_common_builds()
with tools.environment_append({"VisualStudioVersion": "15.0"}):
self.packager.run_builds(1, 1)
self.assertIn("vcvars", self.runner.calls[1])
@unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows")
def test_msvc_no_precommand(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
visual_versions=[15],
exclude_vcvars_precommand=True)
self.packager.add_common_builds()
self.packager.run_builds(1, 1)
self.assertNotIn("vcvars", self.runner.calls[1])
def test_docker_invalid(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True)
self._add_build(1, "msvc", "10")
# Only clang and gcc have docker images
self.assertRaises(Exception, self.packager.run_builds)
def test_assign_builds_retrocompatibility(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5"],
use_docker=True)
self.packager.add_common_builds()
self.packager.builds = [({"os": "Windows"}, {"option": "value"})]
self.assertEquals(self.packager.items, [BuildConf(settings={'os': 'Windows'},
options={'option': 'value'},
env_vars={}, build_requires={},
reference=None)])
def test_only_mingw(self):
mingw_configurations = [("4.9", "x86_64", "seh", "posix")]
builder = ConanMultiPackager(mingw_configurations=mingw_configurations, visual_versions=[],
username="Pepe", platform_info=platform_mock_for("Windows"),
reference="lib/1.0")
builder.add_common_builds(shared_option_name="zlib:shared", pure_c=True)
expected = [({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]})]
self.assertEquals([tuple(a) for a in builder.builds], expected)
def test_named_pages(self):
builder = ConanMultiPackager(username="Pepe")
named_builds = defaultdict(list)
builder.add_common_builds(shared_option_name="zlib:shared", pure_c=True)
for settings, options, env_vars, build_requires in builder.builds:
named_builds[settings['arch']].append([settings, options, env_vars, build_requires])
builder.named_builds = named_builds
self.assertEquals(builder.builds, [])
if platform.system() == "Darwin": # Not default x86 in Macos
self.assertEquals(len(builder.named_builds), 1)
self.assertFalse("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
else:
self.assertEquals(len(builder.named_builds), 2)
self.assertTrue("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
# Conan remote URLs require the username the be in all lowercase
def test_url_handling(self):
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes=["URL1", "URL2"],
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
print(runner.calls)
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="URL1, URL2",
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="URL1",
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
def test_remotes(self):
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes=["url1", "url2"],
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="myurl1",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 myurl1 --insert', runner.calls)
def test_visual_defaults(self):
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"))
builder.add_common_builds()
for settings, _, _, _ in builder.builds:
self.assertEquals(settings["compiler"], "Visual Studio")
self.assertEquals(settings["compiler.version"], "10")
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10",
"MINGW_CONFIGURATIONS": "4.9@x86_64@seh@posix"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"))
builder.add_common_builds()
for settings, _, _, _ in builder.builds:
self.assertEquals(settings["compiler"], "gcc")
self.assertEquals(settings["compiler.version"], "4.9")
def select_defaults_test(self):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
gcc_versions=["4.8", "5"],
username="foo")
self.assertEquals(builder.clang_versions, [])
with tools.environment_append({"CONAN_GCC_VERSIONS": "4.8, 5"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo")
self.assertEquals(builder.clang_versions, [])
self.assertEquals(builder.gcc_versions, ["4.8", "5"])
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
clang_versions=["4.8", "5"],
username="foo")
self.assertEquals(builder.gcc_versions, [])
with tools.environment_append({"CONAN_CLANG_VERSIONS": "4.8, 5"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo")
self.assertEquals(builder.gcc_versions, [])
self.assertEquals(builder.clang_versions, ["4.8", "5"])
def test_upload(self):
runner = MockRunner()
runner.output = "arepo: myurl"
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="myurl, otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# Duplicated upload remote puts upload repo first (in the remotes order)
self.assertEqual(runner.calls[0:3], ['conan remote add remote0 otherurl --insert',
'conan remote add upload_repo myurl --insert',
'conan remote list'])
# Now check that the upload remote order is preserved if we specify it in the remotes
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl, myurl, moreurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# Duplicated upload remote puts upload repo first (in the remotes order)
self.assertEqual(runner.calls[0:3], ['conan remote add remote0 moreurl --insert',
'conan remote add upload_repo myurl --insert',
'conan remote add remote2 otherurl --insert'])
self.assertEqual(runner.calls[-1],
'conan upload Hello/0.1@pepe/testing --retry 3 --all --force '
'--confirm -r=upload_repo')
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
self.assertEqual(runner.calls[0:3],
['conan remote add remote0 otherurl --insert',
'conan remote list',
'conan remote add upload_repo myurl'])
self.assertEqual(runner.calls[-1],
'conan upload Hello/0.1@pepe/testing --retry 3 --all '
'--force --confirm -r=upload_repo')
def test_login(self):
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner)
builder.login("Myremote", "myuser", "mypass", force=False)
self.assertIn('conan user myuser -p="mypass" -r=Myremote', runner.calls[-1])
runner.calls = []
# Already logged, not call conan user again
builder.login("Myremote", "myuser", "mypass", force=False)
self.assertEquals(len(runner.calls), 0)
# Already logged, but forced
runner.calls = []
builder.login("Myremote", "myuser", "mypass", force=True)
self.assertEquals(len(runner.calls), 1)
# Default users/pass
runner.calls = []
builder.login("Myremote2")
self.assertIn('conan user pepe -p="password" -r=Myremote2', runner.calls[-1])
def test_build_policy(self):
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy="outdated")
builder.add_common_builds()
builder.run()
self.assertIn(" --build=outdated", runner.calls[-1])
with tools.environment_append({"CONAN_BUILD_POLICY": "missing"}):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy="missing")
builder.add_common_builds()
builder.run()
self.assertIn(" --build=missing", runner.calls[-1])
def test_check_credentials(self):
runner = MockRunner()
runner.output = "arepo: myurl"
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# When activated, check credentials before to create the profiles
self.assertEqual(runner.calls[0], 'conan remote add upload_repo myurl')
self.assertEqual(runner.calls[2], 'conan user pepe -p="password" -r=upload_repo')
self.assertIn("conan create", runner.calls[-2]) # Not login again before upload its cached
self.assertEqual(runner.calls[-1],
"conan upload Hello/0.1@pepe/testing --retry 3 --all --force --confirm "
"-r=upload_repo")
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# When upload is not required, credentials verification must be avoided
self.assertFalse('conan user pepe -p="password" -r=upload_repo' in runner.calls)
self.assertFalse('conan upload Hello/0.1@pepe/testing --retry 3 '
'--all --force --confirm -r=upload_repo' in runner.calls)
# If we skip the credentials check, the login will be performed just before the upload
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
platform_info=platform_mock_for("Darwin"),
skip_check_credentials=True)
builder.add_common_builds()
builder.run()
self.assertEqual(runner.calls[-2],
'conan user pepe -p="password" -r=upload_repo')
self.assertEqual(runner.calls[-1],
"conan upload Hello/0.1@pepe/testing --retry 3 --all --force --confirm "
"-r=upload_repo")
| import os
import platform
import sys
import unittest
from collections import defaultdict
from conan.builds_generator import BuildConf
from conan.packager import ConanMultiPackager
from conans import tools
from conans.model.ref import ConanFileReference
from conans.util.files import load
from conans.model.profile import Profile
def platform_mock_for(so):
class PlatformInfoMock(object):
def system(self):
return so
return PlatformInfoMock()
class MockRunner(object):
def __init__(self):
self.reset()
self.output = ""
def reset(self):
self.calls = []
def __call__(self, command):
self.calls.append(command)
return 0
def get_profile_from_trace(self, number):
call = self.calls[number]
profile_start = call.find("--profile") + 10
end_profile = call[profile_start:].find(" ") + profile_start
profile_path = call[profile_start: end_profile]
if hasattr(Profile, "loads"): # retrocompatibility
return Profile.loads(load(profile_path))
else:
from conans.client.profile_loader import read_profile
tools.replace_in_file(profile_path, "include", "#include")
# FIXME: Not able to load here the default
return read_profile(profile_path, os.path.dirname(profile_path), None)[0]
def assert_tests_for(self, numbers):
"""Check if executor has ran the builds that are expected.
numbers are integers"""
def assert_profile_for(pr, num):
assert(pr.settings["compiler"] == 'compiler%d' % num)
assert(pr.settings["os"] == 'os%d' % num)
assert(pr.options.as_list() == [('option%d' % num, 'value%d' % num)])
testp_counter = 0
for i, call in enumerate(self.calls):
if call.startswith("conan create"):
profile = self.get_profile_from_trace(i)
assert_profile_for(profile, numbers[testp_counter])
testp_counter += 1
class AppTest(unittest.TestCase):
def setUp(self):
self.runner = MockRunner()
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner)
if "APPVEYOR" in os.environ:
del os.environ["APPVEYOR"]
if "TRAVIS" in os.environ:
del os.environ["TRAVIS"]
def _add_build(self, number, compiler=None, version=None):
self.packager.add({"os": "os%d" % number, "compiler": compiler or "compiler%d" % number,
"compiler.version": version or "4.3"},
{"option%d" % number: "value%d" % number,
"option%d" % number: "value%d" % number})
def test_full_profile(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
self.packager.run_builds(1, 1)
profile = self.runner.get_profile_from_trace(0)
self.assertEquals(profile.settings["os"], "Windows")
self.assertEquals(profile.settings["compiler"], "gcc")
self.assertEquals(profile.options.as_list(), [("option1", "One")])
self.assertEquals(profile.env_values.data[None]["VAR_1"], "ONE")
self.assertEquals(profile.env_values.data[None]["VAR_2"], "TWO")
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing")])
def test_profile_environ(self):
self.packager.add({"os": "Windows", "compiler": "gcc"},
{"option1": "One"},
{"VAR_1": "ONE",
"VAR_2": "TWO"},
{"*": ["myreference/1.0@lasote/testing"]})
with tools.environment_append({"CONAN_BUILD_REQUIRES": "br1/1.0@conan/testing"}):
self.packager.run_builds(1, 1)
profile = self.runner.get_profile_from_trace(0)
self.assertEquals(profile.build_requires["*"],
[ConanFileReference.loads("myreference/1.0@lasote/testing"),
ConanFileReference.loads("br1/1.0@conan/testing")])
def test_pages(self):
for number in range(10):
self._add_build(number)
# 10 pages, 1 per build
self.packager.run_builds(1, 10)
self.runner.assert_tests_for([0])
# 2 pages, 5 per build
self.runner.reset()
self.packager.run_builds(1, 2)
self.runner.assert_tests_for([0, 2, 4, 6, 8])
self.runner.reset()
self.packager.run_builds(2, 2)
self.runner.assert_tests_for([1, 3, 5, 7, 9])
# 3 pages, 4 builds in page 1 and 3 in the rest of pages
self.runner.reset()
self.packager.run_builds(1, 3)
self.runner.assert_tests_for([0, 3, 6, 9])
self.runner.reset()
self.packager.run_builds(2, 3)
self.runner.assert_tests_for([1, 4, 7])
self.runner.reset()
self.packager.run_builds(3, 3)
self.runner.assert_tests_for([2, 5, 8])
def test_deprecation_gcc(self):
with self.assertRaisesRegexp(Exception, "DEPRECATED GCC MINOR VERSIONS!"):
ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5.4"],
use_docker=True)
def test_32bits_images(self):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True,
docker_32_images=True,
reference="zlib/1.2.11")
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.runner.reset()
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True,
docker_32_images=False)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertNotIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.runner.reset()
with tools.environment_append({"CONAN_DOCKER_32_IMAGES": "1"}):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True)
packager.add({"arch": "x86", "compiler": "gcc", "compiler.version": "6"})
packager.run_builds(1, 1)
self.assertIn("docker pull lasote/conangcc6-i386", self.runner.calls[0])
self.assertIn("arch_build=x86\\", self.runner.calls[-1])
def test_docker_gcc(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5"],
use_docker=True)
self._add_build(1, "gcc", "4.3")
self._add_build(2, "gcc", "4.3")
self._add_build(3, "gcc", "4.3")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc43", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc43", self.runner.calls[0])
with tools.environment_append({"CONAN_DOCKER_USE_SUDO": "1"}):
self.packager.run_builds(1, 2)
self.assertIn("sudo docker run", self.runner.calls[-1])
# Next build from 4.3 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
def test_docker_clang(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
clang_versions=["3.8", "4.0"],
use_docker=True)
self._add_build(1, "clang", "3.8")
self._add_build(2, "clang", "3.8")
self._add_build(3, "clang", "3.8")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conanclang38", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
# Next build from 3.8 is cached, not pulls are performed
self.assertIn('os=os3', self.runner.calls[5])
def test_docker_gcc_and_clang(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True)
self._add_build(1, "gcc", "5")
self._add_build(2, "gcc", "5")
self._add_build(3, "gcc", "5")
self._add_build(4, "clang", "3.9")
self._add_build(5, "clang", "3.9")
self._add_build(6, "clang", "3.9")
self.packager.run_builds(1, 2)
self.assertIn("docker pull lasote/conangcc5", self.runner.calls[0])
self.assertIn('docker run ', self.runner.calls[1])
self.assertIn('os=os1', self.runner.calls[4])
self.assertIn('os=os3', self.runner.calls[5])
self.packager.run_builds(2, 2)
self.assertIn("docker pull lasote/conanclang39", self.runner.calls[16])
self.assertIn('docker run ', self.runner.calls[17])
self.assertIn('os=os4', self.runner.calls[20])
self.assertIn('os=os6', self.runner.calls[21])
def test_upload_false(self):
packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel", upload=False)
self.assertFalse(packager._upload_enabled())
def test_docker_env_propagated(self):
# test env
with tools.environment_append({"CONAN_FAKE_VAR": "32"}):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["5", "6"],
clang_versions=["3.9", "4.0"],
use_docker=True)
self._add_build(1, "gcc", "5")
self.packager.run_builds(1, 1)
self.assertIn('-e CONAN_FAKE_VAR="32"', self.runner.calls[-1])
@unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows")
def test_msvc(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
visual_versions=[15])
self.packager.add_common_builds()
with tools.environment_append({"VisualStudioVersion": "15.0"}):
self.packager.run_builds(1, 1)
self.assertIn("vcvars", self.runner.calls[1])
@unittest.skipUnless(sys.platform.startswith("win"), "Requires Windows")
def test_msvc_no_precommand(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
visual_versions=[15],
exclude_vcvars_precommand=True)
self.packager.add_common_builds()
self.packager.run_builds(1, 1)
self.assertNotIn("vcvars", self.runner.calls[1])
def test_docker_invalid(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
use_docker=True)
self._add_build(1, "msvc", "10")
# Only clang and gcc have docker images
self.assertRaises(Exception, self.packager.run_builds)
def test_assign_builds_retrocompatibility(self):
self.packager = ConanMultiPackager("--build missing -r conan.io",
"lasote", "mychannel",
runner=self.runner,
gcc_versions=["4.3", "5"],
use_docker=True)
self.packager.add_common_builds()
self.packager.builds = [({"os": "Windows"}, {"option": "value"})]
self.assertEquals(self.packager.items, [BuildConf(settings={'os': 'Windows'},
options={'option': 'value'},
env_vars={}, build_requires={},
reference=None)])
def test_only_mingw(self):
mingw_configurations = [("4.9", "x86_64", "seh", "posix")]
builder = ConanMultiPackager(mingw_configurations=mingw_configurations, visual_versions=[],
username="Pepe", platform_info=platform_mock_for("Windows"),
reference="lib/1.0")
builder.add_common_builds(shared_option_name="zlib:shared", pure_c=True)
expected = [({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': True},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++",
'compiler.threads': 'posix', 'compiler.version': '4.9', 'arch': 'x86_64',
'build_type': 'Release', 'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]}),
({'compiler.exception': 'seh', 'compiler.libcxx': "libstdc++", 'arch': 'x86_64',
'compiler.threads': 'posix', 'compiler.version': '4.9', 'build_type': 'Debug',
'compiler': 'gcc'},
{'zlib:shared': False},
{},
{'*': [ConanFileReference.loads("mingw_installer/1.0@conan/stable")]})]
self.assertEquals([tuple(a) for a in builder.builds], expected)
def test_named_pages(self):
builder = ConanMultiPackager(username="Pepe")
named_builds = defaultdict(list)
builder.add_common_builds(shared_option_name="zlib:shared", pure_c=True)
for settings, options, env_vars, build_requires in builder.builds:
named_builds[settings['arch']].append([settings, options, env_vars, build_requires])
builder.named_builds = named_builds
self.assertEquals(builder.builds, [])
if platform.system() == "Darwin": # Not default x86 in Macos
self.assertEquals(len(builder.named_builds), 1)
self.assertFalse("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
else:
self.assertEquals(len(builder.named_builds), 2)
self.assertTrue("x86" in builder.named_builds)
self.assertTrue("x86_64" in builder.named_builds)
# Conan remote URLs require the username the be in all lowercase
def test_url_handling(self):
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes=["URL1", "URL2"],
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
print(runner.calls)
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="URL1, URL2",
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="URL1",
upload="URL",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url1 --insert', runner.calls)
self.assertIn('conan remote add upload_repo url', runner.calls)
def test_remotes(self):
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes=["url1", "url2"],
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 url2 --insert', runner.calls)
self.assertIn('conan remote add remote1 url1 --insert', runner.calls)
runner = MockRunner()
builder = ConanMultiPackager(username="Pepe",
remotes="myurl1",
runner=runner)
builder.add({}, {}, {}, {})
builder.run_builds()
self.assertIn('conan remote add remote0 myurl1 --insert', runner.calls)
def test_visual_defaults(self):
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"))
builder.add_common_builds()
for settings, _, _, _ in builder.builds:
self.assertEquals(settings["compiler"], "Visual Studio")
self.assertEquals(settings["compiler.version"], "10")
with tools.environment_append({"CONAN_VISUAL_VERSIONS": "10",
"MINGW_CONFIGURATIONS": "4.9@x86_64@seh@posix"}):
builder = ConanMultiPackager(username="Pepe",
platform_info=platform_mock_for("Windows"))
builder.add_common_builds()
for settings, _, _, _ in builder.builds:
self.assertEquals(settings["compiler"], "gcc")
self.assertEquals(settings["compiler.version"], "4.9")
def select_defaults_test(self):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
gcc_versions=["4.8", "5"],
username="foo")
self.assertEquals(builder.clang_versions, [])
with tools.environment_append({"CONAN_GCC_VERSIONS": "4.8, 5"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo")
self.assertEquals(builder.clang_versions, [])
self.assertEquals(builder.gcc_versions, ["4.8", "5"])
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
clang_versions=["4.8", "5"],
username="foo")
self.assertEquals(builder.gcc_versions, [])
with tools.environment_append({"CONAN_CLANG_VERSIONS": "4.8, 5"}):
builder = ConanMultiPackager(platform_info=platform_mock_for("Linux"),
username="foo")
self.assertEquals(builder.gcc_versions, [])
self.assertEquals(builder.clang_versions, ["4.8", "5"])
def test_upload(self):
runner = MockRunner()
runner.output = "arepo: myurl"
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="myurl, otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# Duplicated upload remote puts upload repo first (in the remotes order)
self.assertEqual(runner.calls[0:3], ['conan remote add remote0 otherurl --insert',
'conan remote add upload_repo myurl --insert',
'conan remote list'])
# Now check that the upload remote order is preserved if we specify it in the remotes
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl, myurl, moreurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# Duplicated upload remote puts upload repo first (in the remotes order)
self.assertEqual(runner.calls[0:3], ['conan remote add remote0 moreurl --insert',
'conan remote add upload_repo myurl --insert',
'conan remote add remote2 otherurl --insert'])
self.assertEqual(runner.calls[-1],
'conan upload Hello/0.1@pepe/testing --retry 3 --all --force '
'--confirm -r=upload_repo')
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
self.assertEqual(runner.calls[0:3],
['conan remote add remote0 otherurl --insert',
'conan remote list',
'conan remote add upload_repo myurl'])
self.assertEqual(runner.calls[-1],
'conan upload Hello/0.1@pepe/testing --retry 3 --all '
'--force --confirm -r=upload_repo')
def test_login(self):
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner)
builder.login("Myremote", "myuser", "mypass", force=False)
self.assertIn('conan user myuser -p="mypass" -r=Myremote', runner.calls[-1])
runner.calls = []
# Already logged, not call conan user again
builder.login("Myremote", "myuser", "mypass", force=False)
self.assertEquals(len(runner.calls), 0)
# Already logged, but forced
runner.calls = []
builder.login("Myremote", "myuser", "mypass", force=True)
self.assertEquals(len(runner.calls), 1)
# Default users/pass
runner.calls = []
builder.login("Myremote2")
self.assertIn('conan user pepe -p="password" -r=Myremote2', runner.calls[-1])
def test_build_policy(self):
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy="outdated")
builder.add_common_builds()
builder.run()
self.assertIn(" --build=outdated", runner.calls[-1])
with tools.environment_append({"CONAN_BUILD_POLICY": "missing"}):
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"),
build_policy="missing")
builder.add_common_builds()
builder.run()
self.assertIn(" --build=missing", runner.calls[-1])
def test_check_credentials(self):
runner = MockRunner()
runner.output = "arepo: myurl"
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# When activated, check credentials before to create the profiles
self.assertEqual(runner.calls[0], 'conan remote add upload_repo myurl')
self.assertEqual(runner.calls[2], 'conan user pepe -p="password" -r=upload_repo')
self.assertIn("conan create", runner.calls[-2]) # Not login again before upload its cached
self.assertEqual(runner.calls[-1],
"conan upload Hello/0.1@pepe/testing --retry 3 --all --force --confirm "
"-r=upload_repo")
runner = MockRunner()
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
remotes="otherurl",
platform_info=platform_mock_for("Darwin"))
builder.add_common_builds()
builder.run()
# When upload is not required, credentials verification must be avoided
self.assertFalse('conan user pepe -p="password" -r=upload_repo' in runner.calls)
self.assertFalse('conan upload Hello/0.1@pepe/testing --retry 3 '
'--all --force --confirm -r=upload_repo' in runner.calls)
# If we skip the credentials check, the login will be performed just before the upload
builder = ConanMultiPackager(username="pepe", channel="testing",
reference="Hello/0.1", password="password",
upload="myurl", visual_versions=[], gcc_versions=[],
apple_clang_versions=[],
runner=runner,
platform_info=platform_mock_for("Darwin"),
skip_check_credentials=True)
builder.add_common_builds()
builder.run()
self.assertEqual(runner.calls[-2],
'conan user pepe -p="password" -r=upload_repo')
self.assertEqual(runner.calls[-1],
"conan upload Hello/0.1@pepe/testing --retry 3 --all --force --confirm "
"-r=upload_repo")
| en | 0.869384 | # retrocompatibility # FIXME: Not able to load here the default Check if executor has ran the builds that are expected. numbers are integers # 10 pages, 1 per build # 2 pages, 5 per build # 3 pages, 4 builds in page 1 and 3 in the rest of pages # Next build from 4.3 is cached, not pulls are performed # Next build from 3.8 is cached, not pulls are performed # test env # Only clang and gcc have docker images # Not default x86 in Macos # Conan remote URLs require the username the be in all lowercase # Duplicated upload remote puts upload repo first (in the remotes order) # Now check that the upload remote order is preserved if we specify it in the remotes # Duplicated upload remote puts upload repo first (in the remotes order) # Already logged, not call conan user again # Already logged, but forced # Default users/pass # When activated, check credentials before to create the profiles # Not login again before upload its cached # When upload is not required, credentials verification must be avoided # If we skip the credentials check, the login will be performed just before the upload | 2.312374 | 2 |
setup.py | liying2008/document-template | 0 | 6614608 | # -*- coding: utf-8 -*-
import codecs
import os
import sys
from setuptools import find_packages
from setuptools import setup
import document_template
url = 'https://github.com/liying2008/document-template'
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
with codecs.open("README.rst", "r", "utf-8") as fh:
long_description = fh.read()
setup(
name='document-template',
version=document_template.__version__,
description="Generate documents from templates.",
long_description=long_description,
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
],
project_urls={
'Documentation': url,
'Source': url,
},
keywords='template document parser',
author=document_template.__author__,
author_email=document_template.__email__,
maintainer=document_template.__author__,
maintainer_email=document_template.__email__,
url=url,
license=document_template.__license__,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
entry_points={},
)
| # -*- coding: utf-8 -*-
import codecs
import os
import sys
from setuptools import find_packages
from setuptools import setup
import document_template
url = 'https://github.com/liying2008/document-template'
# 'setup.py publish' shortcut.
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist bdist_wheel')
os.system('twine upload dist/*')
sys.exit()
with codecs.open("README.rst", "r", "utf-8") as fh:
long_description = fh.read()
setup(
name='document-template',
version=document_template.__version__,
description="Generate documents from templates.",
long_description=long_description,
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
],
project_urls={
'Documentation': url,
'Source': url,
},
keywords='template document parser',
author=document_template.__author__,
author_email=document_template.__email__,
maintainer=document_template.__author__,
maintainer_email=document_template.__email__,
url=url,
license=document_template.__license__,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
entry_points={},
)
| en | 0.512883 | # -*- coding: utf-8 -*- # 'setup.py publish' shortcut. | 1.493203 | 1 |
src/conductor/client/http/models/workflow_def.py | conductor-sdk/conductor-python | 3 | 6614609 | import pprint
import re # noqa: F401
import six
class WorkflowDef(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'owner_app': 'str',
'create_time': 'int',
'update_time': 'int',
'created_by': 'str',
'updated_by': 'str',
'name': 'str',
'description': 'str',
'version': 'int',
'tasks': 'list[WorkflowTask]',
'input_parameters': 'list[str]',
'output_parameters': 'dict(str, object)',
'failure_workflow': 'str',
'schema_version': 'int',
'restartable': 'bool',
'workflow_status_listener_enabled': 'bool',
'owner_email': 'str',
'timeout_policy': 'str',
'timeout_seconds': 'int',
'variables': 'dict(str, object)',
'input_template': 'dict(str, object)'
}
attribute_map = {
'owner_app': 'ownerApp',
'create_time': 'createTime',
'update_time': 'updateTime',
'created_by': 'createdBy',
'updated_by': 'updatedBy',
'name': 'name',
'description': 'description',
'version': 'version',
'tasks': 'tasks',
'input_parameters': 'inputParameters',
'output_parameters': 'outputParameters',
'failure_workflow': 'failureWorkflow',
'schema_version': 'schemaVersion',
'restartable': 'restartable',
'workflow_status_listener_enabled': 'workflowStatusListenerEnabled',
'owner_email': 'ownerEmail',
'timeout_policy': 'timeoutPolicy',
'timeout_seconds': 'timeoutSeconds',
'variables': 'variables',
'input_template': 'inputTemplate'
}
def __init__(self, owner_app=None, create_time=None, update_time=None, created_by=None, updated_by=None, name=None, description=None, version=None, tasks=None, input_parameters=None, output_parameters=None, failure_workflow=None, schema_version=None, restartable=None, workflow_status_listener_enabled=None, owner_email=None, timeout_policy=None, timeout_seconds=None, variables=None, input_template=None): # noqa: E501
"""WorkflowDef - a model defined in Swagger""" # noqa: E501
self._owner_app = None
self._create_time = None
self._update_time = None
self._created_by = None
self._updated_by = None
self._name = None
self._description = None
self._version = None
self._tasks = None
self._input_parameters = None
self._output_parameters = None
self._failure_workflow = None
self._schema_version = None
self._restartable = None
self._workflow_status_listener_enabled = None
self._owner_email = None
self._timeout_policy = None
self._timeout_seconds = None
self._variables = None
self._input_template = None
self.discriminator = None
if owner_app is not None:
self.owner_app = owner_app
if create_time is not None:
self.create_time = create_time
if update_time is not None:
self.update_time = update_time
if created_by is not None:
self.created_by = created_by
if updated_by is not None:
self.updated_by = updated_by
self.name = name
if description is not None:
self.description = description
if version is not None:
self.version = version
self.tasks = tasks
if input_parameters is not None:
self.input_parameters = input_parameters
if output_parameters is not None:
self.output_parameters = output_parameters
if failure_workflow is not None:
self.failure_workflow = failure_workflow
if schema_version is not None:
self.schema_version = schema_version
if restartable is not None:
self.restartable = restartable
if workflow_status_listener_enabled is not None:
self.workflow_status_listener_enabled = workflow_status_listener_enabled
if owner_email is not None:
self.owner_email = owner_email
if timeout_policy is not None:
self.timeout_policy = timeout_policy
self.timeout_seconds = timeout_seconds
if variables is not None:
self.variables = variables
if input_template is not None:
self.input_template = input_template
@property
def owner_app(self):
"""Gets the owner_app of this WorkflowDef. # noqa: E501
:return: The owner_app of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._owner_app
@owner_app.setter
def owner_app(self, owner_app):
"""Sets the owner_app of this WorkflowDef.
:param owner_app: The owner_app of this WorkflowDef. # noqa: E501
:type: str
"""
self._owner_app = owner_app
@property
def create_time(self):
"""Gets the create_time of this WorkflowDef. # noqa: E501
:return: The create_time of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this WorkflowDef.
:param create_time: The create_time of this WorkflowDef. # noqa: E501
:type: int
"""
self._create_time = create_time
@property
def update_time(self):
"""Gets the update_time of this WorkflowDef. # noqa: E501
:return: The update_time of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this WorkflowDef.
:param update_time: The update_time of this WorkflowDef. # noqa: E501
:type: int
"""
self._update_time = update_time
@property
def created_by(self):
"""Gets the created_by of this WorkflowDef. # noqa: E501
:return: The created_by of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this WorkflowDef.
:param created_by: The created_by of this WorkflowDef. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def updated_by(self):
"""Gets the updated_by of this WorkflowDef. # noqa: E501
:return: The updated_by of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._updated_by
@updated_by.setter
def updated_by(self, updated_by):
"""Sets the updated_by of this WorkflowDef.
:param updated_by: The updated_by of this WorkflowDef. # noqa: E501
:type: str
"""
self._updated_by = updated_by
@property
def name(self):
"""Gets the name of this WorkflowDef. # noqa: E501
:return: The name of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this WorkflowDef.
:param name: The name of this WorkflowDef. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this WorkflowDef. # noqa: E501
:return: The description of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this WorkflowDef.
:param description: The description of this WorkflowDef. # noqa: E501
:type: str
"""
self._description = description
@property
def version(self):
"""Gets the version of this WorkflowDef. # noqa: E501
:return: The version of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this WorkflowDef.
:param version: The version of this WorkflowDef. # noqa: E501
:type: int
"""
self._version = version
@property
def tasks(self):
"""Gets the tasks of this WorkflowDef. # noqa: E501
:return: The tasks of this WorkflowDef. # noqa: E501
:rtype: list[WorkflowTask]
"""
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""Sets the tasks of this WorkflowDef.
:param tasks: The tasks of this WorkflowDef. # noqa: E501
:type: list[WorkflowTask]
"""
if tasks is None:
raise ValueError("Invalid value for `tasks`, must not be `None`") # noqa: E501
self._tasks = tasks
@property
def input_parameters(self):
"""Gets the input_parameters of this WorkflowDef. # noqa: E501
:return: The input_parameters of this WorkflowDef. # noqa: E501
:rtype: list[str]
"""
return self._input_parameters
@input_parameters.setter
def input_parameters(self, input_parameters):
"""Sets the input_parameters of this WorkflowDef.
:param input_parameters: The input_parameters of this WorkflowDef. # noqa: E501
:type: list[str]
"""
self._input_parameters = input_parameters
@property
def output_parameters(self):
"""Gets the output_parameters of this WorkflowDef. # noqa: E501
:return: The output_parameters of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._output_parameters
@output_parameters.setter
def output_parameters(self, output_parameters):
"""Sets the output_parameters of this WorkflowDef.
:param output_parameters: The output_parameters of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._output_parameters = output_parameters
@property
def failure_workflow(self):
"""Gets the failure_workflow of this WorkflowDef. # noqa: E501
:return: The failure_workflow of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._failure_workflow
@failure_workflow.setter
def failure_workflow(self, failure_workflow):
"""Sets the failure_workflow of this WorkflowDef.
:param failure_workflow: The failure_workflow of this WorkflowDef. # noqa: E501
:type: str
"""
self._failure_workflow = failure_workflow
@property
def schema_version(self):
"""Gets the schema_version of this WorkflowDef. # noqa: E501
:return: The schema_version of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._schema_version
@schema_version.setter
def schema_version(self, schema_version):
"""Sets the schema_version of this WorkflowDef.
:param schema_version: The schema_version of this WorkflowDef. # noqa: E501
:type: int
"""
self._schema_version = schema_version
@property
def restartable(self):
"""Gets the restartable of this WorkflowDef. # noqa: E501
:return: The restartable of this WorkflowDef. # noqa: E501
:rtype: bool
"""
return self._restartable
@restartable.setter
def restartable(self, restartable):
"""Sets the restartable of this WorkflowDef.
:param restartable: The restartable of this WorkflowDef. # noqa: E501
:type: bool
"""
self._restartable = restartable
@property
def workflow_status_listener_enabled(self):
"""Gets the workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:return: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:rtype: bool
"""
return self._workflow_status_listener_enabled
@workflow_status_listener_enabled.setter
def workflow_status_listener_enabled(self, workflow_status_listener_enabled):
"""Sets the workflow_status_listener_enabled of this WorkflowDef.
:param workflow_status_listener_enabled: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:type: bool
"""
self._workflow_status_listener_enabled = workflow_status_listener_enabled
@property
def owner_email(self):
"""Gets the owner_email of this WorkflowDef. # noqa: E501
:return: The owner_email of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._owner_email
@owner_email.setter
def owner_email(self, owner_email):
"""Sets the owner_email of this WorkflowDef.
:param owner_email: The owner_email of this WorkflowDef. # noqa: E501
:type: str
"""
self._owner_email = owner_email
@property
def timeout_policy(self):
"""Gets the timeout_policy of this WorkflowDef. # noqa: E501
:return: The timeout_policy of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._timeout_policy
@timeout_policy.setter
def timeout_policy(self, timeout_policy):
"""Sets the timeout_policy of this WorkflowDef.
:param timeout_policy: The timeout_policy of this WorkflowDef. # noqa: E501
:type: str
"""
allowed_values = ["TIME_OUT_WF", "ALERT_ONLY"] # noqa: E501
if timeout_policy not in allowed_values:
raise ValueError(
"Invalid value for `timeout_policy` ({0}), must be one of {1}" # noqa: E501
.format(timeout_policy, allowed_values)
)
self._timeout_policy = timeout_policy
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this WorkflowDef. # noqa: E501
:return: The timeout_seconds of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this WorkflowDef.
:param timeout_seconds: The timeout_seconds of this WorkflowDef. # noqa: E501
:type: int
"""
if timeout_seconds is None:
raise ValueError("Invalid value for `timeout_seconds`, must not be `None`") # noqa: E501
self._timeout_seconds = timeout_seconds
@property
def variables(self):
"""Gets the variables of this WorkflowDef. # noqa: E501
:return: The variables of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this WorkflowDef.
:param variables: The variables of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._variables = variables
@property
def input_template(self):
"""Gets the input_template of this WorkflowDef. # noqa: E501
:return: The input_template of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._input_template
@input_template.setter
def input_template(self, input_template):
"""Sets the input_template of this WorkflowDef.
:param input_template: The input_template of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._input_template = input_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkflowDef, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkflowDef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| import pprint
import re # noqa: F401
import six
class WorkflowDef(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'owner_app': 'str',
'create_time': 'int',
'update_time': 'int',
'created_by': 'str',
'updated_by': 'str',
'name': 'str',
'description': 'str',
'version': 'int',
'tasks': 'list[WorkflowTask]',
'input_parameters': 'list[str]',
'output_parameters': 'dict(str, object)',
'failure_workflow': 'str',
'schema_version': 'int',
'restartable': 'bool',
'workflow_status_listener_enabled': 'bool',
'owner_email': 'str',
'timeout_policy': 'str',
'timeout_seconds': 'int',
'variables': 'dict(str, object)',
'input_template': 'dict(str, object)'
}
attribute_map = {
'owner_app': 'ownerApp',
'create_time': 'createTime',
'update_time': 'updateTime',
'created_by': 'createdBy',
'updated_by': 'updatedBy',
'name': 'name',
'description': 'description',
'version': 'version',
'tasks': 'tasks',
'input_parameters': 'inputParameters',
'output_parameters': 'outputParameters',
'failure_workflow': 'failureWorkflow',
'schema_version': 'schemaVersion',
'restartable': 'restartable',
'workflow_status_listener_enabled': 'workflowStatusListenerEnabled',
'owner_email': 'ownerEmail',
'timeout_policy': 'timeoutPolicy',
'timeout_seconds': 'timeoutSeconds',
'variables': 'variables',
'input_template': 'inputTemplate'
}
def __init__(self, owner_app=None, create_time=None, update_time=None, created_by=None, updated_by=None, name=None, description=None, version=None, tasks=None, input_parameters=None, output_parameters=None, failure_workflow=None, schema_version=None, restartable=None, workflow_status_listener_enabled=None, owner_email=None, timeout_policy=None, timeout_seconds=None, variables=None, input_template=None): # noqa: E501
"""WorkflowDef - a model defined in Swagger""" # noqa: E501
self._owner_app = None
self._create_time = None
self._update_time = None
self._created_by = None
self._updated_by = None
self._name = None
self._description = None
self._version = None
self._tasks = None
self._input_parameters = None
self._output_parameters = None
self._failure_workflow = None
self._schema_version = None
self._restartable = None
self._workflow_status_listener_enabled = None
self._owner_email = None
self._timeout_policy = None
self._timeout_seconds = None
self._variables = None
self._input_template = None
self.discriminator = None
if owner_app is not None:
self.owner_app = owner_app
if create_time is not None:
self.create_time = create_time
if update_time is not None:
self.update_time = update_time
if created_by is not None:
self.created_by = created_by
if updated_by is not None:
self.updated_by = updated_by
self.name = name
if description is not None:
self.description = description
if version is not None:
self.version = version
self.tasks = tasks
if input_parameters is not None:
self.input_parameters = input_parameters
if output_parameters is not None:
self.output_parameters = output_parameters
if failure_workflow is not None:
self.failure_workflow = failure_workflow
if schema_version is not None:
self.schema_version = schema_version
if restartable is not None:
self.restartable = restartable
if workflow_status_listener_enabled is not None:
self.workflow_status_listener_enabled = workflow_status_listener_enabled
if owner_email is not None:
self.owner_email = owner_email
if timeout_policy is not None:
self.timeout_policy = timeout_policy
self.timeout_seconds = timeout_seconds
if variables is not None:
self.variables = variables
if input_template is not None:
self.input_template = input_template
@property
def owner_app(self):
"""Gets the owner_app of this WorkflowDef. # noqa: E501
:return: The owner_app of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._owner_app
@owner_app.setter
def owner_app(self, owner_app):
"""Sets the owner_app of this WorkflowDef.
:param owner_app: The owner_app of this WorkflowDef. # noqa: E501
:type: str
"""
self._owner_app = owner_app
@property
def create_time(self):
"""Gets the create_time of this WorkflowDef. # noqa: E501
:return: The create_time of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this WorkflowDef.
:param create_time: The create_time of this WorkflowDef. # noqa: E501
:type: int
"""
self._create_time = create_time
@property
def update_time(self):
"""Gets the update_time of this WorkflowDef. # noqa: E501
:return: The update_time of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this WorkflowDef.
:param update_time: The update_time of this WorkflowDef. # noqa: E501
:type: int
"""
self._update_time = update_time
@property
def created_by(self):
"""Gets the created_by of this WorkflowDef. # noqa: E501
:return: The created_by of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this WorkflowDef.
:param created_by: The created_by of this WorkflowDef. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def updated_by(self):
"""Gets the updated_by of this WorkflowDef. # noqa: E501
:return: The updated_by of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._updated_by
@updated_by.setter
def updated_by(self, updated_by):
"""Sets the updated_by of this WorkflowDef.
:param updated_by: The updated_by of this WorkflowDef. # noqa: E501
:type: str
"""
self._updated_by = updated_by
@property
def name(self):
"""Gets the name of this WorkflowDef. # noqa: E501
:return: The name of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this WorkflowDef.
:param name: The name of this WorkflowDef. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this WorkflowDef. # noqa: E501
:return: The description of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this WorkflowDef.
:param description: The description of this WorkflowDef. # noqa: E501
:type: str
"""
self._description = description
@property
def version(self):
"""Gets the version of this WorkflowDef. # noqa: E501
:return: The version of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this WorkflowDef.
:param version: The version of this WorkflowDef. # noqa: E501
:type: int
"""
self._version = version
@property
def tasks(self):
"""Gets the tasks of this WorkflowDef. # noqa: E501
:return: The tasks of this WorkflowDef. # noqa: E501
:rtype: list[WorkflowTask]
"""
return self._tasks
@tasks.setter
def tasks(self, tasks):
"""Sets the tasks of this WorkflowDef.
:param tasks: The tasks of this WorkflowDef. # noqa: E501
:type: list[WorkflowTask]
"""
if tasks is None:
raise ValueError("Invalid value for `tasks`, must not be `None`") # noqa: E501
self._tasks = tasks
@property
def input_parameters(self):
"""Gets the input_parameters of this WorkflowDef. # noqa: E501
:return: The input_parameters of this WorkflowDef. # noqa: E501
:rtype: list[str]
"""
return self._input_parameters
@input_parameters.setter
def input_parameters(self, input_parameters):
"""Sets the input_parameters of this WorkflowDef.
:param input_parameters: The input_parameters of this WorkflowDef. # noqa: E501
:type: list[str]
"""
self._input_parameters = input_parameters
@property
def output_parameters(self):
"""Gets the output_parameters of this WorkflowDef. # noqa: E501
:return: The output_parameters of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._output_parameters
@output_parameters.setter
def output_parameters(self, output_parameters):
"""Sets the output_parameters of this WorkflowDef.
:param output_parameters: The output_parameters of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._output_parameters = output_parameters
@property
def failure_workflow(self):
"""Gets the failure_workflow of this WorkflowDef. # noqa: E501
:return: The failure_workflow of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._failure_workflow
@failure_workflow.setter
def failure_workflow(self, failure_workflow):
"""Sets the failure_workflow of this WorkflowDef.
:param failure_workflow: The failure_workflow of this WorkflowDef. # noqa: E501
:type: str
"""
self._failure_workflow = failure_workflow
@property
def schema_version(self):
"""Gets the schema_version of this WorkflowDef. # noqa: E501
:return: The schema_version of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._schema_version
@schema_version.setter
def schema_version(self, schema_version):
"""Sets the schema_version of this WorkflowDef.
:param schema_version: The schema_version of this WorkflowDef. # noqa: E501
:type: int
"""
self._schema_version = schema_version
@property
def restartable(self):
"""Gets the restartable of this WorkflowDef. # noqa: E501
:return: The restartable of this WorkflowDef. # noqa: E501
:rtype: bool
"""
return self._restartable
@restartable.setter
def restartable(self, restartable):
"""Sets the restartable of this WorkflowDef.
:param restartable: The restartable of this WorkflowDef. # noqa: E501
:type: bool
"""
self._restartable = restartable
@property
def workflow_status_listener_enabled(self):
"""Gets the workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:return: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:rtype: bool
"""
return self._workflow_status_listener_enabled
@workflow_status_listener_enabled.setter
def workflow_status_listener_enabled(self, workflow_status_listener_enabled):
"""Sets the workflow_status_listener_enabled of this WorkflowDef.
:param workflow_status_listener_enabled: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501
:type: bool
"""
self._workflow_status_listener_enabled = workflow_status_listener_enabled
@property
def owner_email(self):
"""Gets the owner_email of this WorkflowDef. # noqa: E501
:return: The owner_email of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._owner_email
@owner_email.setter
def owner_email(self, owner_email):
"""Sets the owner_email of this WorkflowDef.
:param owner_email: The owner_email of this WorkflowDef. # noqa: E501
:type: str
"""
self._owner_email = owner_email
@property
def timeout_policy(self):
"""Gets the timeout_policy of this WorkflowDef. # noqa: E501
:return: The timeout_policy of this WorkflowDef. # noqa: E501
:rtype: str
"""
return self._timeout_policy
@timeout_policy.setter
def timeout_policy(self, timeout_policy):
"""Sets the timeout_policy of this WorkflowDef.
:param timeout_policy: The timeout_policy of this WorkflowDef. # noqa: E501
:type: str
"""
allowed_values = ["TIME_OUT_WF", "ALERT_ONLY"] # noqa: E501
if timeout_policy not in allowed_values:
raise ValueError(
"Invalid value for `timeout_policy` ({0}), must be one of {1}" # noqa: E501
.format(timeout_policy, allowed_values)
)
self._timeout_policy = timeout_policy
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this WorkflowDef. # noqa: E501
:return: The timeout_seconds of this WorkflowDef. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this WorkflowDef.
:param timeout_seconds: The timeout_seconds of this WorkflowDef. # noqa: E501
:type: int
"""
if timeout_seconds is None:
raise ValueError("Invalid value for `timeout_seconds`, must not be `None`") # noqa: E501
self._timeout_seconds = timeout_seconds
@property
def variables(self):
"""Gets the variables of this WorkflowDef. # noqa: E501
:return: The variables of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._variables
@variables.setter
def variables(self, variables):
"""Sets the variables of this WorkflowDef.
:param variables: The variables of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._variables = variables
@property
def input_template(self):
"""Gets the input_template of this WorkflowDef. # noqa: E501
:return: The input_template of this WorkflowDef. # noqa: E501
:rtype: dict(str, object)
"""
return self._input_template
@input_template.setter
def input_template(self, input_template):
"""Sets the input_template of this WorkflowDef.
:param input_template: The input_template of this WorkflowDef. # noqa: E501
:type: dict(str, object)
"""
self._input_template = input_template
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkflowDef, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkflowDef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| en | 0.663046 | # noqa: F401 NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. # noqa: E501 WorkflowDef - a model defined in Swagger # noqa: E501 Gets the owner_app of this WorkflowDef. # noqa: E501 :return: The owner_app of this WorkflowDef. # noqa: E501 :rtype: str Sets the owner_app of this WorkflowDef. :param owner_app: The owner_app of this WorkflowDef. # noqa: E501 :type: str Gets the create_time of this WorkflowDef. # noqa: E501 :return: The create_time of this WorkflowDef. # noqa: E501 :rtype: int Sets the create_time of this WorkflowDef. :param create_time: The create_time of this WorkflowDef. # noqa: E501 :type: int Gets the update_time of this WorkflowDef. # noqa: E501 :return: The update_time of this WorkflowDef. # noqa: E501 :rtype: int Sets the update_time of this WorkflowDef. :param update_time: The update_time of this WorkflowDef. # noqa: E501 :type: int Gets the created_by of this WorkflowDef. # noqa: E501 :return: The created_by of this WorkflowDef. # noqa: E501 :rtype: str Sets the created_by of this WorkflowDef. :param created_by: The created_by of this WorkflowDef. # noqa: E501 :type: str Gets the updated_by of this WorkflowDef. # noqa: E501 :return: The updated_by of this WorkflowDef. # noqa: E501 :rtype: str Sets the updated_by of this WorkflowDef. :param updated_by: The updated_by of this WorkflowDef. # noqa: E501 :type: str Gets the name of this WorkflowDef. # noqa: E501 :return: The name of this WorkflowDef. # noqa: E501 :rtype: str Sets the name of this WorkflowDef. :param name: The name of this WorkflowDef. # noqa: E501 :type: str # noqa: E501 Gets the description of this WorkflowDef. # noqa: E501 :return: The description of this WorkflowDef. # noqa: E501 :rtype: str Sets the description of this WorkflowDef. :param description: The description of this WorkflowDef. # noqa: E501 :type: str Gets the version of this WorkflowDef. # noqa: E501 :return: The version of this WorkflowDef. # noqa: E501 :rtype: int Sets the version of this WorkflowDef. :param version: The version of this WorkflowDef. # noqa: E501 :type: int Gets the tasks of this WorkflowDef. # noqa: E501 :return: The tasks of this WorkflowDef. # noqa: E501 :rtype: list[WorkflowTask] Sets the tasks of this WorkflowDef. :param tasks: The tasks of this WorkflowDef. # noqa: E501 :type: list[WorkflowTask] # noqa: E501 Gets the input_parameters of this WorkflowDef. # noqa: E501 :return: The input_parameters of this WorkflowDef. # noqa: E501 :rtype: list[str] Sets the input_parameters of this WorkflowDef. :param input_parameters: The input_parameters of this WorkflowDef. # noqa: E501 :type: list[str] Gets the output_parameters of this WorkflowDef. # noqa: E501 :return: The output_parameters of this WorkflowDef. # noqa: E501 :rtype: dict(str, object) Sets the output_parameters of this WorkflowDef. :param output_parameters: The output_parameters of this WorkflowDef. # noqa: E501 :type: dict(str, object) Gets the failure_workflow of this WorkflowDef. # noqa: E501 :return: The failure_workflow of this WorkflowDef. # noqa: E501 :rtype: str Sets the failure_workflow of this WorkflowDef. :param failure_workflow: The failure_workflow of this WorkflowDef. # noqa: E501 :type: str Gets the schema_version of this WorkflowDef. # noqa: E501 :return: The schema_version of this WorkflowDef. # noqa: E501 :rtype: int Sets the schema_version of this WorkflowDef. :param schema_version: The schema_version of this WorkflowDef. # noqa: E501 :type: int Gets the restartable of this WorkflowDef. # noqa: E501 :return: The restartable of this WorkflowDef. # noqa: E501 :rtype: bool Sets the restartable of this WorkflowDef. :param restartable: The restartable of this WorkflowDef. # noqa: E501 :type: bool Gets the workflow_status_listener_enabled of this WorkflowDef. # noqa: E501 :return: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501 :rtype: bool Sets the workflow_status_listener_enabled of this WorkflowDef. :param workflow_status_listener_enabled: The workflow_status_listener_enabled of this WorkflowDef. # noqa: E501 :type: bool Gets the owner_email of this WorkflowDef. # noqa: E501 :return: The owner_email of this WorkflowDef. # noqa: E501 :rtype: str Sets the owner_email of this WorkflowDef. :param owner_email: The owner_email of this WorkflowDef. # noqa: E501 :type: str Gets the timeout_policy of this WorkflowDef. # noqa: E501 :return: The timeout_policy of this WorkflowDef. # noqa: E501 :rtype: str Sets the timeout_policy of this WorkflowDef. :param timeout_policy: The timeout_policy of this WorkflowDef. # noqa: E501 :type: str # noqa: E501 # noqa: E501 Gets the timeout_seconds of this WorkflowDef. # noqa: E501 :return: The timeout_seconds of this WorkflowDef. # noqa: E501 :rtype: int Sets the timeout_seconds of this WorkflowDef. :param timeout_seconds: The timeout_seconds of this WorkflowDef. # noqa: E501 :type: int # noqa: E501 Gets the variables of this WorkflowDef. # noqa: E501 :return: The variables of this WorkflowDef. # noqa: E501 :rtype: dict(str, object) Sets the variables of this WorkflowDef. :param variables: The variables of this WorkflowDef. # noqa: E501 :type: dict(str, object) Gets the input_template of this WorkflowDef. # noqa: E501 :return: The input_template of this WorkflowDef. # noqa: E501 :rtype: dict(str, object) Sets the input_template of this WorkflowDef. :param input_template: The input_template of this WorkflowDef. # noqa: E501 :type: dict(str, object) Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 2.159156 | 2 |
tsh/errors.py | alefnula/cmd | 1 | 6614610 | class TshError(Exception):
def __init__(self, message: str = ""):
self.message = message
def __str__(self):
return f"{self.__class__.__name__}({self.message})"
__repr__ = __str__
class CommandNotFound(TshError):
pass
| class TshError(Exception):
def __init__(self, message: str = ""):
self.message = message
def __str__(self):
return f"{self.__class__.__name__}({self.message})"
__repr__ = __str__
class CommandNotFound(TshError):
pass
| none | 1 | 2.888852 | 3 | |
tests/lib/test_demux.py | bogdanvuk/pygears | 120 | 6614611 | import pytest
from pygears.util.test_utils import synth_check
from pygears.typing import Union, Uint
from pygears.lib import demux, mux, demux_ctrl
from pygears.lib.delay import delay_rng
from pygears.lib.verif import drv, directed
from pygears.sim import sim
from pygears import Intf, gear
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
@pytest.mark.parametrize('branches', list(range(2, 10)))
def test_simple_directed(sim_cls, din_delay, dout_delay, branches):
seq = [(i, i) for i in range(branches)]
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
directed(
drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(branches)],
ref=[[i] for i in range(branches)])
sim()
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
@pytest.mark.parametrize('branches', list(range(2, 10)))
def test_mapped_directed(sim_cls, din_delay, dout_delay, branches):
seq = [(i, i) for i in range(branches)]
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
mapping = {}
for i in range(branches):
mapping[i] = (i + 1) if (i + 1) < branches else 0
ref = [[(i - 1) if (i - 1) >= 0 else (branches - 1)]
for i in range(branches)]
directed(
drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(mapping=mapping, sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(branches)],
ref=ref)
sim()
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
def test_mapped_default_directed(sim_cls, din_delay, dout_delay):
seq = [(i, i) for i in range(8)]
TDin = Union[tuple(Uint[i] for i in range(1, 8 + 1))]
mapping = {3: 0, 4: 0, 7: 1}
ref = [[3, 4], [7], [0, 1, 2, 5, 6]]
directed(drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(mapping=mapping, sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(3)],
ref=ref)
sim()
@pytest.mark.parametrize('branches', [2, 3, 27])
@synth_check({'logic luts': 0, 'ffs': 0}, tool='yosys')
def test_mux_demux_redux_yosys(branches):
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
@gear
def test(din):
return demux_ctrl(din) | mux
test(Intf(TDin))
| import pytest
from pygears.util.test_utils import synth_check
from pygears.typing import Union, Uint
from pygears.lib import demux, mux, demux_ctrl
from pygears.lib.delay import delay_rng
from pygears.lib.verif import drv, directed
from pygears.sim import sim
from pygears import Intf, gear
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
@pytest.mark.parametrize('branches', list(range(2, 10)))
def test_simple_directed(sim_cls, din_delay, dout_delay, branches):
seq = [(i, i) for i in range(branches)]
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
directed(
drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(branches)],
ref=[[i] for i in range(branches)])
sim()
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
@pytest.mark.parametrize('branches', list(range(2, 10)))
def test_mapped_directed(sim_cls, din_delay, dout_delay, branches):
seq = [(i, i) for i in range(branches)]
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
mapping = {}
for i in range(branches):
mapping[i] = (i + 1) if (i + 1) < branches else 0
ref = [[(i - 1) if (i - 1) >= 0 else (branches - 1)]
for i in range(branches)]
directed(
drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(mapping=mapping, sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(branches)],
ref=ref)
sim()
@pytest.mark.parametrize('din_delay', [0, 1])
@pytest.mark.parametrize('dout_delay', [0, 1])
def test_mapped_default_directed(sim_cls, din_delay, dout_delay):
seq = [(i, i) for i in range(8)]
TDin = Union[tuple(Uint[i] for i in range(1, 8 + 1))]
mapping = {3: 0, 4: 0, 7: 1}
ref = [[3, 4], [7], [0, 1, 2, 5, 6]]
directed(drv(t=TDin, seq=seq) | delay_rng(din_delay, din_delay),
f=demux(mapping=mapping, sim_cls=sim_cls),
delays=[delay_rng(dout_delay, dout_delay) for _ in range(3)],
ref=ref)
sim()
@pytest.mark.parametrize('branches', [2, 3, 27])
@synth_check({'logic luts': 0, 'ffs': 0}, tool='yosys')
def test_mux_demux_redux_yosys(branches):
TDin = Union[tuple(Uint[i] for i in range(1, branches + 1))]
@gear
def test(din):
return demux_ctrl(din) | mux
test(Intf(TDin))
| none | 1 | 2.001707 | 2 | |
core/migrations/0008_auto_20200105_1723.py | IS-AgroSmart/MVP | 0 | 6614612 | # Generated by Django 3.0.1 on 2020-01-05 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20191229_2356'),
]
operations = [
migrations.AddField(
model_name='flight',
name='is_demo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='demo_flights',
field=models.ManyToManyField(related_name='demo_users', to='core.Flight'),
),
]
| # Generated by Django 3.0.1 on 2020-01-05 17:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20191229_2356'),
]
operations = [
migrations.AddField(
model_name='flight',
name='is_demo',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='demo_flights',
field=models.ManyToManyField(related_name='demo_users', to='core.Flight'),
),
]
| en | 0.826186 | # Generated by Django 3.0.1 on 2020-01-05 17:23 | 1.719671 | 2 |
ocelot/transformations/consequential/__init__.py | cmutel/Ocelot | 21 | 6614613 | <filename>ocelot/transformations/consequential/__init__.py
# -*- coding: utf-8 -*-
from ...collection import Collection
from ..identifying import add_unique_codes
from ..locations import (
actualize_activity_links,
add_suppliers_to_markets,
allocate_all_market_suppliers,
assign_fake_pv_to_confidential_datasets,
delete_whitelisted_zero_pv_market_datsets,
delete_suppliers_list,
link_consumers_to_markets,
log_and_delete_unlinked_exchanges,
relabel_global_to_row,
update_market_production_volumes,
)
from ..utils import label_reference_product
from .byproducts import ensure_byproducts_have_alternative_production
from .constrained_markets import (
delete_activity_links_to_constrained_markets,
handle_constrained_markets,
)
from .market_linking import prune_suppliers_by_technology_level
from .combined import split_combined_production
from functools import partial
link_markets_by_technology_level = Collection(
"Consequential market linking by technology level",
label_reference_product,
delete_whitelisted_zero_pv_market_datsets,
assign_fake_pv_to_confidential_datasets,
relabel_global_to_row,
add_unique_codes,
actualize_activity_links,
add_suppliers_to_markets,
prune_suppliers_by_technology_level,
update_market_production_volumes,
partial(add_suppliers_to_markets, from_type="market activity",
to_type="market group"),
partial(update_market_production_volumes, kind='market group'),
allocate_all_market_suppliers,
delete_suppliers_list,
# drop_zero_pv_row_datasets,
link_consumers_to_markets,
log_and_delete_unlinked_exchanges,
)
| <filename>ocelot/transformations/consequential/__init__.py
# -*- coding: utf-8 -*-
from ...collection import Collection
from ..identifying import add_unique_codes
from ..locations import (
actualize_activity_links,
add_suppliers_to_markets,
allocate_all_market_suppliers,
assign_fake_pv_to_confidential_datasets,
delete_whitelisted_zero_pv_market_datsets,
delete_suppliers_list,
link_consumers_to_markets,
log_and_delete_unlinked_exchanges,
relabel_global_to_row,
update_market_production_volumes,
)
from ..utils import label_reference_product
from .byproducts import ensure_byproducts_have_alternative_production
from .constrained_markets import (
delete_activity_links_to_constrained_markets,
handle_constrained_markets,
)
from .market_linking import prune_suppliers_by_technology_level
from .combined import split_combined_production
from functools import partial
link_markets_by_technology_level = Collection(
"Consequential market linking by technology level",
label_reference_product,
delete_whitelisted_zero_pv_market_datsets,
assign_fake_pv_to_confidential_datasets,
relabel_global_to_row,
add_unique_codes,
actualize_activity_links,
add_suppliers_to_markets,
prune_suppliers_by_technology_level,
update_market_production_volumes,
partial(add_suppliers_to_markets, from_type="market activity",
to_type="market group"),
partial(update_market_production_volumes, kind='market group'),
allocate_all_market_suppliers,
delete_suppliers_list,
# drop_zero_pv_row_datasets,
link_consumers_to_markets,
log_and_delete_unlinked_exchanges,
)
| en | 0.626947 | # -*- coding: utf-8 -*- # drop_zero_pv_row_datasets, | 1.667616 | 2 |
apps/geovinci/views/front.py | octaflop/geovinci | 4 | 6614614 | from django.shortcuts import render
def index(request):
ctx = {}
template_name = "geovinci/front/index.html"
return render(request, template_name, ctx)
| from django.shortcuts import render
def index(request):
ctx = {}
template_name = "geovinci/front/index.html"
return render(request, template_name, ctx)
| none | 1 | 1.44587 | 1 | |
workbooks/feature_selection.py | SirSharpest/RNA-Seq-Analysis | 0 | 6614615 | from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE, RFECV
from gprofiler import GProfiler
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def read_xl(fn="/Users/aoife/PHD/Transcripts/Data/diff_from_col0:False_onlyDiff:False.xlsx"):
xl = pd.ExcelFile(fn)
sheet_names = xl.sheet_names
dfs = []
for s in sheet_names:
d = xl.parse(s)
d['sample'] = s.split("|")[0].replace(" ", "")
dfs.append(d)
DE = pd.concat(dfs)
DE = DE.rename_axis('gene').sort_values(by=['gene', 'log2FoldChange'],
ascending=[False, False])
return DE
def get_gene_names(geneList):
gp = GProfiler(return_dataframe=True)
df = gp.convert(organism='athaliana',
query=geneList)[['incoming', 'name', 'description']]
df['description'] = df.apply(lambda x: x['description'].split('[')[
0].split(';')[0], axis=1)
return df
counts = pd.read_csv(
"/Users/hughesn/PHD/Transcripts/Data/norml_count_data.csv", index_col=0)
counts[[c for c in counts.columns if 'cer_c' in c]].head(5)
# load data
DE_pairings_05hr = read_xl('./Data/pairings_05hr.xlsx')
sig = DE_pairings_05hr[DE_pairings_05hr['padj'] < 1]
sig = sig['log2FoldChange'].sort_values()
locs = sig.index
df = counts.loc[locs][[c for c in counts.columns if (
'05h' in c and ('col' in c or 'lym' in c or 'cer' in c))]].T
df = df.loc[:, ~df.columns.duplicated()]
df = df[[c for c in set(df.columns.values)]]
# Feature Extraction with RFE
X = df.values
y = [y.rsplit('_', 1)[0] for y in df.reset_index()['index']]
# feature extraction
model = LogisticRegression()
rfe = RFE(model, n_features_to_select=25)
fit = rfe.fit(X, y)
print("Num Features: {0}".format(fit.n_features_))
print("Selected Features: {0}".format(fit.support_))
print("Feature Ranking: {0}".format(fit.ranking_))
genes = []
for r, f in zip(fit.ranking_, df.columns.values):
if r == 1:
genes.append(f)
get_gene_names(genes)
rfe_forest = counts.loc[genes][[c for c in counts.columns if (
'05h' in c and ('col' in c or 'lym' in c))]].T
rfe_forest = rfe_forest.loc[:, ~rfe_forest.columns.duplicated()]
rfe_forest = rfe_forest[[c for c in set(rfe_forest.columns.values)]]
feat_labels = rfe_forest.columns.values
y = [d.rsplit('_', 1)[0] for d in rfe_forest.index.values]
X_train, X_test, y_train, y_test = train_test_split(
rfe_forest.values, y, test_size=1, random_state=42)
forest = RandomForestClassifier(n_estimators=20000, random_state=1, n_jobs=-1)
forest.fit(X_train, y_train)
res = {k: v for k, v in sorted(
zip(feat_labels, forest.feature_importances_), key=lambda x: x[1], reverse=True)}
res_df = pd.DataFrame(list(res.items()), columns=[
'gene', 'importance']).set_index('gene')
names = get_gene_names(list(res_df.index))
res_df = pd.merge(res_df, names, left_index=True, right_on='incoming').rename(
columns={'incoming': 'gene'}).set_index('gene').sort_values('importance', ascending=False)
res_df.to_csv('results.csv')
joblib.dump(forest, 'saved_model.pkl')
| from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE, RFECV
from gprofiler import GProfiler
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
def read_xl(fn="/Users/aoife/PHD/Transcripts/Data/diff_from_col0:False_onlyDiff:False.xlsx"):
xl = pd.ExcelFile(fn)
sheet_names = xl.sheet_names
dfs = []
for s in sheet_names:
d = xl.parse(s)
d['sample'] = s.split("|")[0].replace(" ", "")
dfs.append(d)
DE = pd.concat(dfs)
DE = DE.rename_axis('gene').sort_values(by=['gene', 'log2FoldChange'],
ascending=[False, False])
return DE
def get_gene_names(geneList):
gp = GProfiler(return_dataframe=True)
df = gp.convert(organism='athaliana',
query=geneList)[['incoming', 'name', 'description']]
df['description'] = df.apply(lambda x: x['description'].split('[')[
0].split(';')[0], axis=1)
return df
counts = pd.read_csv(
"/Users/hughesn/PHD/Transcripts/Data/norml_count_data.csv", index_col=0)
counts[[c for c in counts.columns if 'cer_c' in c]].head(5)
# load data
DE_pairings_05hr = read_xl('./Data/pairings_05hr.xlsx')
sig = DE_pairings_05hr[DE_pairings_05hr['padj'] < 1]
sig = sig['log2FoldChange'].sort_values()
locs = sig.index
df = counts.loc[locs][[c for c in counts.columns if (
'05h' in c and ('col' in c or 'lym' in c or 'cer' in c))]].T
df = df.loc[:, ~df.columns.duplicated()]
df = df[[c for c in set(df.columns.values)]]
# Feature Extraction with RFE
X = df.values
y = [y.rsplit('_', 1)[0] for y in df.reset_index()['index']]
# feature extraction
model = LogisticRegression()
rfe = RFE(model, n_features_to_select=25)
fit = rfe.fit(X, y)
print("Num Features: {0}".format(fit.n_features_))
print("Selected Features: {0}".format(fit.support_))
print("Feature Ranking: {0}".format(fit.ranking_))
genes = []
for r, f in zip(fit.ranking_, df.columns.values):
if r == 1:
genes.append(f)
get_gene_names(genes)
rfe_forest = counts.loc[genes][[c for c in counts.columns if (
'05h' in c and ('col' in c or 'lym' in c))]].T
rfe_forest = rfe_forest.loc[:, ~rfe_forest.columns.duplicated()]
rfe_forest = rfe_forest[[c for c in set(rfe_forest.columns.values)]]
feat_labels = rfe_forest.columns.values
y = [d.rsplit('_', 1)[0] for d in rfe_forest.index.values]
X_train, X_test, y_train, y_test = train_test_split(
rfe_forest.values, y, test_size=1, random_state=42)
forest = RandomForestClassifier(n_estimators=20000, random_state=1, n_jobs=-1)
forest.fit(X_train, y_train)
res = {k: v for k, v in sorted(
zip(feat_labels, forest.feature_importances_), key=lambda x: x[1], reverse=True)}
res_df = pd.DataFrame(list(res.items()), columns=[
'gene', 'importance']).set_index('gene')
names = get_gene_names(list(res_df.index))
res_df = pd.merge(res_df, names, left_index=True, right_on='incoming').rename(
columns={'incoming': 'gene'}).set_index('gene').sort_values('importance', ascending=False)
res_df.to_csv('results.csv')
joblib.dump(forest, 'saved_model.pkl')
| en | 0.804505 | # load data # Feature Extraction with RFE # feature extraction | 2.578689 | 3 |
forms.py | viniciustr/movelit | 2 | 6614616 | from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, EqualTo, Length
# Set your classes here.
class QuestionsForm(Form):
name = TextField(
'Username', validators=[DataRequired(), Length(min=6, max=25)]
)
| from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, EqualTo, Length
# Set your classes here.
class QuestionsForm(Form):
name = TextField(
'Username', validators=[DataRequired(), Length(min=6, max=25)]
)
| en | 0.905702 | # Set your classes here. | 2.873779 | 3 |
Data/batcher_unittest.py | shihui2010/continuous_cnn | 0 | 6614617 | import numpy as np
import unittest
from unittest import TestCase
from sinusoid_signal import SignalGen as SingleSine
from sinusoid_signal import DoubleSignal
from sinusoid_signal import PredSignal
from mnist.batcher import Batcher as MnistBatcher
from UCR.UCRBatcher import Batcher as UCRBatcher
from data_market.DMBatcher import Batcher as DMBatcher
from UCI.Batcher import Batcher as UCIBatcher
class TestAllBatchers(TestCase):
def setUp(self):
batchers = list()
names = list()
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": False}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": False,
"seq_level": True}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": False,
"seq_level": False}))
names.extend(
["SingleSine_Uniform_Seq", "SingleSine_Uniform_Sample",
"SingleSine_Nonuniform_Seq", "SingleSine_Nonuniform_Sample"])
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
names.extend(
["DoubleSine_Uniform_Seq", "DoubleSine_Uniform_Sample",
"DoubleSine_Nonuniform_Seq", "Doubleine_Nonuniform_Sample"])
batchers.append(PredSignal({"input_length": 100,
"interval": 1.0,
"uniform": True,
"freq_range": [2, 100],
"delta_t": 10}))
names.extend("SinePrediction")
batchers.append(MnistBatcher("mnist/1024_1d",
{"onehot": True}))
batchers.append(MnistBatcher("mnist/1024_1d",
{"onehot": False}))
names.extend(["MNIST_1D_OH", "MNIST_1D"])
batchers.append(UCRBatcher("StarLightCurves", input_length=10,
seq_level=True, prediction=True))
batchers.append(UCRBatcher("50words", input_length=10,
seq_level=True, prediction=False))
batchers.append(UCRBatcher("ScreenType", input_length=10,
seq_level=False, prediction=False))
names.extend(["UCR_SLC", "UCR_50W", "UCR_ST"])
batchers.append(DMBatcher("5.csv", 16))
names.append("DMBatcher")
batchers.append(UCIBatcher("gesture", 25, True, True))
batchers.append(UCIBatcher("gesture", 5, False, True))
batchers.append(UCIBatcher("gesture", 40, False, False))
names.extend(["UCI_Regression", "UCI_Seq_Class", "UCI_Sam_Class"])
self.batchers = batchers
self.names = names
def test_all(self):
for b, n in zip(self.batchers, self.names):
self.assertHasProperAttributes(b, n)
self.assertShapeCorrect(b, n)
def assertHasProperAttributes(self, batcher, name):
attributes = ["input_length", "output_length", "input_channel",
"output_channel"]
for att in attributes:
self.assertTrue(hasattr(batcher, att),
name + " has no property " + att)
def assertShapeCorrect(self, batcher, name):
"""
interval should be 2D, with shape [batch_size, input_length]
signal should be 3d, with shape [batch_size, input_length, input_channel]
label should be 3d, wish shape [batch_size, output_length, out_channel]
"""
batch_size = int(np.random.rand() * 100) + 1
for method in [batcher.next_train, batcher.next_test]:
interval, signal, label = method(batch_size)
interval = np.array(interval)
shape = (batch_size, batcher.input_length)
self.assertTupleEqual(
interval.shape, shape,
name + " Interval Dimension Error: " + str(interval.shape)
+ " vs. " + str(shape))
signal = np.array(signal)
shape = (batch_size, batcher.input_length, batcher.input_channel)
self.assertTupleEqual(
signal.shape, shape,
name + " Signal Dimension Error: " + str(signal.shape)
+ " vs. " + str(shape))
label = np.array(label)
shape = (batch_size, batcher.output_length, batcher.output_channel)
self.assertTupleEqual(
label.shape, shape,
name + " Label Dimension Error: " + str(label.shape)
+ " vs. " + str(shape))
if __name__ == "__main__":
unittest.main() | import numpy as np
import unittest
from unittest import TestCase
from sinusoid_signal import SignalGen as SingleSine
from sinusoid_signal import DoubleSignal
from sinusoid_signal import PredSignal
from mnist.batcher import Batcher as MnistBatcher
from UCR.UCRBatcher import Batcher as UCRBatcher
from data_market.DMBatcher import Batcher as DMBatcher
from UCI.Batcher import Batcher as UCIBatcher
class TestAllBatchers(TestCase):
def setUp(self):
batchers = list()
names = list()
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": False}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": False,
"seq_level": True}))
batchers.append(SingleSine({"input_length": 1000,
"interval": 2.0,
"uniform": False,
"seq_level": False}))
names.extend(
["SingleSine_Uniform_Seq", "SingleSine_Uniform_Sample",
"SingleSine_Nonuniform_Seq", "SingleSine_Nonuniform_Sample"])
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
batchers.append(DoubleSignal({"input_length": 1000,
"interval": 2.0,
"uniform": True,
"seq_level": True}))
names.extend(
["DoubleSine_Uniform_Seq", "DoubleSine_Uniform_Sample",
"DoubleSine_Nonuniform_Seq", "Doubleine_Nonuniform_Sample"])
batchers.append(PredSignal({"input_length": 100,
"interval": 1.0,
"uniform": True,
"freq_range": [2, 100],
"delta_t": 10}))
names.extend("SinePrediction")
batchers.append(MnistBatcher("mnist/1024_1d",
{"onehot": True}))
batchers.append(MnistBatcher("mnist/1024_1d",
{"onehot": False}))
names.extend(["MNIST_1D_OH", "MNIST_1D"])
batchers.append(UCRBatcher("StarLightCurves", input_length=10,
seq_level=True, prediction=True))
batchers.append(UCRBatcher("50words", input_length=10,
seq_level=True, prediction=False))
batchers.append(UCRBatcher("ScreenType", input_length=10,
seq_level=False, prediction=False))
names.extend(["UCR_SLC", "UCR_50W", "UCR_ST"])
batchers.append(DMBatcher("5.csv", 16))
names.append("DMBatcher")
batchers.append(UCIBatcher("gesture", 25, True, True))
batchers.append(UCIBatcher("gesture", 5, False, True))
batchers.append(UCIBatcher("gesture", 40, False, False))
names.extend(["UCI_Regression", "UCI_Seq_Class", "UCI_Sam_Class"])
self.batchers = batchers
self.names = names
def test_all(self):
for b, n in zip(self.batchers, self.names):
self.assertHasProperAttributes(b, n)
self.assertShapeCorrect(b, n)
def assertHasProperAttributes(self, batcher, name):
attributes = ["input_length", "output_length", "input_channel",
"output_channel"]
for att in attributes:
self.assertTrue(hasattr(batcher, att),
name + " has no property " + att)
def assertShapeCorrect(self, batcher, name):
"""
interval should be 2D, with shape [batch_size, input_length]
signal should be 3d, with shape [batch_size, input_length, input_channel]
label should be 3d, wish shape [batch_size, output_length, out_channel]
"""
batch_size = int(np.random.rand() * 100) + 1
for method in [batcher.next_train, batcher.next_test]:
interval, signal, label = method(batch_size)
interval = np.array(interval)
shape = (batch_size, batcher.input_length)
self.assertTupleEqual(
interval.shape, shape,
name + " Interval Dimension Error: " + str(interval.shape)
+ " vs. " + str(shape))
signal = np.array(signal)
shape = (batch_size, batcher.input_length, batcher.input_channel)
self.assertTupleEqual(
signal.shape, shape,
name + " Signal Dimension Error: " + str(signal.shape)
+ " vs. " + str(shape))
label = np.array(label)
shape = (batch_size, batcher.output_length, batcher.output_channel)
self.assertTupleEqual(
label.shape, shape,
name + " Label Dimension Error: " + str(label.shape)
+ " vs. " + str(shape))
if __name__ == "__main__":
unittest.main() | en | 0.718423 | interval should be 2D, with shape [batch_size, input_length] signal should be 3d, with shape [batch_size, input_length, input_channel] label should be 3d, wish shape [batch_size, output_length, out_channel] | 2.166092 | 2 |
poradnia/feedback_custom/forms.py | efefre/poradnia | 23 | 6614618 | from atom.ext.crispy_forms.forms import FormHorizontalMixin, SingleButtonMixin
from poradnia.tasty_feedback.forms import FeedbackForm
class CustomFeedbackForm(FormHorizontalMixin, SingleButtonMixin, FeedbackForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.form_tag = False
| from atom.ext.crispy_forms.forms import FormHorizontalMixin, SingleButtonMixin
from poradnia.tasty_feedback.forms import FeedbackForm
class CustomFeedbackForm(FormHorizontalMixin, SingleButtonMixin, FeedbackForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper.form_tag = False
| none | 1 | 1.890207 | 2 | |
Tkinter/Manipulations_flat_objects/flat_objects_mod.py | bitcoineazy/Tkinter_apps | 0 | 6614619 | from tkinter import *
import math
import random
class Figure:
def __init__(self, canvas, **coords):
self.__dict__.update(coords)
self.canvas = canvas
self.rectangles = []
self.triangles = []
self.hexagons = []
self.ovals = []
self.strips = []
self.overlapping_1 = []
self.overlapping_2 = []
self.symmetric_1 = []
self.symmetric_2 = []
self.thy = []
self.rectangles_generated = False
self.triangles_generated = False
self.hexagons_generated = False
self.ovals_generated = False
self.overlapping_generated = False
self.symmetric_generated = False
self.thy_generated = False
def create_rectangles(self):
while not self.rectangles_generated:
for i in range(1000):
self.rectangles.append(self.canvas.create_rectangle(
self.x1 + (i*40), self.y1, self.x2 + (i*40), self.y2, fill='black'))
self.rectangles.append(self.canvas.create_rectangle(
self.x1 - (i*40), self.y1, self.x2 - (i*40), self.y2, fill='black'))
self.rectangles_generated = True
for each in self.rectangles:
self.canvas.move(each, -1, 0)
self.canvas.after(10, self.create_rectangles)
def create_triangles(self):
while not self.triangles_generated:
for i in range(1000):
self.triangles.append(self.canvas.create_polygon(
self.x1 + (i*40), self.y1, self.x2 + (i*40), self.y2, self.x3 + (i*40), self.y3
))
self.triangles.append(self.canvas.create_polygon(
self.x1 - (i*40), self.y1, self.x2 - (i*40), self.y2, self.x3 - (i*40), self.y3
))
self.triangles_generated = True
for each in self.triangles:
self.canvas.move(each, 1, 0)
self.canvas.after(10, self.create_triangles)
def create_hexagons(self):
while not self.hexagons_generated:
for i in range(1000):
self.hexagons.append(self.canvas.create_polygon(
self.x4 + (i*80), self.y4, self.x5 + (i*80), self.y5, self.x6 + (i*80), self.y6,
self.x1 + (i*80), self.y1, self.x2 + (i*80), self.y2, self.x3 + (i*80), self.y3,
))
self.hexagons.append(self.canvas.create_polygon(
self.x1 - (i*80), self.y1, self.x2 - (i*80), self.y2, self.x3 - (i*80), self.y3,
self.x4 - (i*80), self.y4, self.x5 - (i*80), self.y5, self.x6 - (i*80), self.y6,
))
self.hexagons_generated = True
for each in self.hexagons:
if self.hexagons.index(each) // 3 :
self.canvas.itemconfigure(each, fill='darkred')
self.canvas.move(each, 10, 0)
self.canvas.after(10, self.create_hexagons)
def create_custom(self, n, angle):
de = ("%02x" % random.randint(0, 255))
re = ("%02x" % random.randint(0, 255))
we = ("%02x" % random.randint(0, 255))
ge = "#"
random_color = ge + de + re + we
self.canvas.scale(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1, self.y1, self.x2, self.y2, n, angle), fill=random_color), self.x1+15, self.y1+15, random.randint(1, 6), random.randint(1, 6))
def create_n(self, n, angle):
while not self.ovals_generated:
for i in range(1000):
self.ovals.append(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1 + (i*60), self.y1, self.x2 + (i*60), self.y2,
n, angle)
))
self.ovals.append(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1 - (i*60), self.y1, self.x2 - (i*60), self.y2,
n, angle)
))
self.ovals_generated = True
def get_n_angles_coords(self, x1, y1, x2, y2, n, angle):
rotation = angle * math.pi / 180.0
# Оси
a = (x2 - x1) / 2.0
b = (y2 - y1) / 2.0
# Центр
xc = x1 + a
yc = y1 + b
point_list = []
for i in range(n):
theta = (math.pi * 2) * (float(i) / n)
x1 = a * math.cos(theta)
y1 = b * math.sin(theta)
# Поворачиваем x, y
x = (x1 * math.cos(rotation)) + (y1 * math.sin(rotation))
y = (y1 * math.cos(rotation)) - (x1 * math.sin(rotation))
point_list.append(round(x + xc))
point_list.append(round(y + yc))
return point_list
def create_3_strips(self): # 4.1
for i in range(1000):
self.strips.append(self.canvas.create_rectangle(
self.x1 + (i*50), self.y1 + (i*50), self.x2 + (i*50), self.y2 + (i*50), fill='red'
))
self.strips.append(self.canvas.create_rectangle(
self.x1 - (i*50), self.y1 - (i*50), self.x2 - (i*50), self.y2 - (i*50), fill='red'
))
self.strips.append(self.canvas.create_rectangle(
self.x1-25 + (i*50), self.y1 + (i*50), self.x2-25 + (i*50), self.y2 + (i*50), fill='blue'
))
self.strips.append(self.canvas.create_rectangle(
self.x1-25 - (i*50), self.y1 - (i*50), self.x2-25 - (i*50), self.y2 - (i*50), fill='blue'
))
self.strips.append(self.canvas.create_rectangle(
self.x1+25 + (i*50), self.y1 + (i*50), self.x2+25 + (i*50), self.y2 + (i*50), fill='yellow'
))
self.strips.append(self.canvas.create_rectangle(
self.x1+25 - (i*50), self.y1 - (i*50), self.x2+25 - (i*50), self.y2 - (i*50), fill='yellow'
))
def create_overlapping(self): # 4.2
while not self.overlapping_generated:
for i in range(1000):
self.overlapping_1.append(self.canvas.create_rectangle(
self.x1 + (i*50), self.y1 + (i*25), self.x2 + (i*50), self.y2 + (i*25), fill='red'
))
self.overlapping_1.append(self.canvas.create_rectangle(
self.x1 - (i*50), self.y1 - (i*25), self.x2 - (i*50), self.y2 - (i*25), fill='red'
))
self.overlapping_2.append(self.canvas.create_polygon(
self.x1_2 + (i*40), self.y1_2, self.x2_2 + (i*40), self.y2_2, fill='blue'
))
self.overlapping_2.append(self.canvas.create_rectangle(
self.x1_2 - (i*40), self.y1_2, self.x2_2 - (i*40), self.y2_2, fill='blue'
))
self.overlapping_generated = True
for each in self.overlapping_1:
self.canvas.move(each, 4, 2)
for each in self.overlapping_2:
self.canvas.move(each, 5, 0)
self.canvas.after(10, self.create_overlapping)
def create_symmetric(self): #4.3
while not self.symmetric_generated:
for i in range(1000):
self.symmetric_1.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 + (i*30), self.y1, self.x2 + (i*30), self.y2, n=3, angle=150)))
self.symmetric_1.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 - (i*30), self.y1, self.x2 - (i*30), self.y2, n=3, angle=150)))
self.symmetric_2.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 + (i*30), self.y1+30, self.x2 + (i*30), self.y2+30, n=3, angle=90)))
self.symmetric_2.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 - (i*30), self.y1+30, self.x2 - (i*30), self.y2+30, n=3, angle=90)))
self.symmetric_generated = True
for each in self.symmetric_1:
self.canvas.move(each, -7, 0)
for each in self.symmetric_2:
self.canvas.move(each, 7, 0)
self.canvas.after(10, self.create_symmetric)
def create_thy(self): # 4.4
while not self.thy_generated:
for i in range(10):
for z in range(10):
self.thy.append(self.canvas.create_rectangle(375, 375, 375 + (i*20), 375 + (z*30), fill='black'))
self.thy.append(self.canvas.create_rectangle(375, 375, 375 - (i*20), 375 - (z*30), fill='white'))
self.thy_generated = True
self.canvas.after(10, self.create_thy)
class Objects(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background='white')
self.parent = parent
self.parent.title('Манипуляции с фигурами')
self.pack(fill=BOTH, expand=1)
self.centerWindow()
self.initUI()
self.all_figures = []
def initUI(self):
self.canvas_area = Button(
self, text='Поле', width=10, command=self.make_canvas)
self.rectangles = Button(
self, text='gen_rectangle()',
command=self.gen_rectangle, width=16)
self.rectangles.grid(row=0, column=1)
self.canvas_area.grid(row=0, column=0)
triangles = Button(self, text='gen_triangles()', command=self.gen_triangle, width=16)
hexagons = Button(self, text='gen_hexagons()', command=self.gen_hexagon, width=16)
n_angles = Button(self, text='n_угольники()', command=self.gen_n_angles, width=16)
rotating = Button(self, text='rotate()', command=self.rotate, width=16)
strips_1 = Button(self, text='4.1', command=self.make_strips, width=2)
strips_2 = Button(self, text='4.2', command=self.make_overlapping, width=2)
symmetric = Button(self, text='4.3', command=self.make_symmetric, width=2)
homotethy = Button(self, text='4.4', command=self.make_thy, width=2)
operations = Button(self, text='Операции', command=self.operations, width=16)
strips_1.grid(row=1, column=3, sticky='w')
strips_2.grid(row=1, column=3)
symmetric.grid(row=1, column=3, sticky='e')
homotethy.grid(row=2, column=3)
self.rotating_angle = Entry(self, width=16)
moving = Button(self, text='move()', command=self.move, width=16)
self.deltaxy = Entry(self, width=16)
n_angle_label = Label(self, width=20, text='Кол-во углов, поворот')
self.n_angle = Entry(self, width=16)
triangles.grid(row=0, column=2)
hexagons.grid(row=0, column=3)
rotating.grid(row=1, column=0)
moving.grid(row=1, column=1)
n_angles.grid(row=0, column=4)
n_angle_label.grid(row=1, column=4)
self.rotating_angle.grid(row=2, column=0)
self.deltaxy.grid(row=2, column=1)
self.n_angle.grid(row=2, column=4)
operations.grid(row=1, column=2)
def make_canvas(self):
self.canvas_window = Toplevel(self)
self.canvas = Canvas(self.canvas_window, width=750, height=750)
self.canvas.grid(row=0, column=0)
# TODO: frontend
for i in range(1000): # x,y axes
self.canvas.create_line(0 + (i*375), 375, 750 + (i*750), 375, width=2)
self.canvas.create_line(0 - (i*375), 375, 750 - (i*750), 375, width=2)
self.canvas.create_line(375, 750 + (i*750), 375, 0 + (i*750), width=2)
self.canvas.create_line(375, 750 - (i*750), 375, 0 - (i*750), width=2)
coords_grid = [i for i in range(1000000) if i % 100 == 0]
for i in range(1000):
if i > 0:
self.canvas.create_text(375 + (i*100), 385, text=f'{coords_grid[i]}')
self.canvas.create_text(375 - (i*100), 385, text=f'-{coords_grid[i]}')
self.canvas.create_text(395, 395 + (i*100), text=f'-{coords_grid[i]}')
self.canvas.create_text(395, 395 - (i*100), text=f'{coords_grid[i]}')
# Фиксация элементов координатной сетки, чтобы в дальнейшем её не двигать
self.canvas_grid = self.canvas.find_all()
def gen_rectangle(self):
all_rects = Figure(self.canvas, x1=250, y1=250, x2=275, y2=275)
all_rects.create_rectangles()
def gen_triangle(self):
all_triangles = Figure(self.canvas, x1=300, y1=250, x2=285, y2=330, x3=265, y3=250)
all_triangles.create_triangles()
def gen_hexagon(self):
all_hexagons = Figure(self.canvas, x1=235, y1=224, x2=265, y2=224, x3=280, y3=250,
x4=265, y4=276, x5=235, y5=276, x6=220, y6=250)
all_hexagons.create_hexagons()
def gen_n_angles(self):
all_n_angles = Figure(self.canvas, x1=250, y1=250, x2=290, y2=290)
n, angle = self.n_angle.get().split(',')
all_n_angles.create_n(int(n), int(angle))
def make_strips(self): # 4.1
strips = Figure(self.canvas, x1=375, y1=375, x2=395, y2=395)
strips.create_3_strips()
def make_overlapping(self): # 4.2
overlapping = Figure(self.canvas, x1=375, y1=375, x2=405, y2=405,
x1_2=500, y1_2=500, x2_2=530, y2_2=530)
overlapping.create_overlapping()
def make_symmetric(self): # 4.3
symmetric = Figure(self.canvas, x1=400, y1=400, x2=430, y2=430)
symmetric.create_symmetric()
def make_thy(self): # 4.4
thy = Figure(self.canvas, x1=375, y1=375, x2=400, y2=400)
thy.create_thy()
def move(self):
"""Параллельный перенос"""
deltax, deltay = self.deltaxy.get().split(',')
all_figures = self.canvas.find_all()
# Фигуры, которые можно двигать (без поля)
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for each in movable_figures:
self.canvas.move(each, deltax, deltay)
self.canvas.after(10, self.move)
def rotate(self):
angle = int(self.rotating_angle.get())
#rotation = angle * math.pi / 180.0
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
box_coords = []
real_coords = []
for each in movable_figures:
box_coords.append(self.canvas.bbox(each))
real_coords.append(self.canvas.coords(each))
#print(box_coords)
new_coords = []
rotator = Figure(self.canvas)
for item in box_coords:
x1 = item[0]
y1 = item[1]
x2 = item[2]
y2 = item[3]
n = len(real_coords[box_coords.index(item)]) // 2 # кол-во углов у фигуры
new_coords.append(rotator.get_n_angles_coords(x1, y1, x2, y2, n, angle))
#print(new_coords)
#print(real_coords)
for figure in movable_figures:
self.rotating(figure, new_coords[movable_figures.index(figure)])
def rotating(self, figure, *args):
self.canvas.coords(figure, [float(x) for x in args[0]])
def operations(self): # Окно с операциями из 5,8 заданий
self.operations_window = Toplevel(self)
count_area = Button(self.operations_window, text='Площадь', command=self.count_area, width=16)
custom_figure = Button(self.operations_window, text='Фигура', command=self.custom_figure, width=8)
resize = Button(self.operations_window, text='Размер', command=self.resize, width=16)
ang = Button(self.operations_window, text='Ближайшая вершина', command=self.ang, width=16)
min_edge = Button(self.operations_window, text='Минимальное ребро', command=self.long_s, width=16)
perimeter = Button(self.operations_window, text='Периметр', command=self.perimeter, width=16)
max_area = Button(self.operations_window, text='Максимальная S', command=self.max_ar, width=16)
area_filtration = Button(self.operations_window, text='Фильтрация площади', command=self.area_filter, width=16)
min_len_filter = Button(self.operations_window, text='Фил по мин ребру', command=self.min_len_filter, width=16)
find_location_angle = Button(self.operations_window, text='Фил по вхож в коорд', command=self.find_angle_location, width=16)
self.min_len_filter_count = Entry(self.operations_window, width=16)
self.area_filter_count = Entry(self.operations_window, width=16)
self.find_location = Entry(self.operations_window, width=16)
count_area.grid(row=0, column=5)
custom_figure.grid(row=0, column=0)
resize.grid(row=0, column=1)
ang.grid(row=0,column=2, sticky='w')
min_edge.grid(row=0, column=3, sticky='w')
perimeter.grid(row=0, column=4, sticky='w')
max_area.grid(row=1, column=5)
area_filtration.grid(row=1, column=1)
min_len_filter.grid(row=1, column=2)
find_location_angle.grid(row=1, column=3)
self.area_filter_count.grid(row=2, column=1)
self.min_len_filter_count.grid(row=2, column=2)
self.find_location.grid(row=2, column=3)
self.answer = Text(self.operations_window, width=50, height=4)
self.answer.grid(row=1, column=0)
def resize(self): # Изменить размер
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for each in movable_figures:
self.canvas.scale(each, 375, 375, 1.2, 1.2)
def area_filter(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
areas = []
areas_indexes = []
for each in movable_figures_coords:
if self.area(each) < float(self.area_filter_count.get()):
areas_indexes.append(movable_figures_coords.index(each))
for each in areas:
if each < int(self.area_filter_count.get()):
areas_indexes.append(areas.index(each))
for i in areas_indexes:
self.canvas.delete(movable_figures[i])
def min_len_filter(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
areas = []
min_indexes = []
for each in movable_figures_coords:
if self.long_s_py(each)[1] < int(self.min_len_filter_count.get()):
min_indexes.append(movable_figures_coords.index(each))
for i in min_indexes:
self.canvas.delete(movable_figures[i])
def find_angle_location(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
location_indexes = []
location = self.find_location.get().split(',')
for each in movable_figures_coords:
if location in self.canvas.coords(each):
location_indexes.append(movable_figures_coords.index(each))
for i in location_indexes:
self.canvas.delete(movable_figures[i])
def custom_figure(self):
n, angle = self.n_angle.get().split(',')
x1 = random.randint(100, 600)
y1 = x1
x2 = x1 + 30
y2 = x2
figure = Figure(self.canvas, x1=x1, y1=y1, x2=x2, y2=y2)
figure.create_custom(int(n), int(angle))
def get_figures(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
real_coords = []
for each in movable_figures:
real_coords.append(self.canvas.coords(each))
tuple_coords = []
buff = []
for item in real_coords:
for i in range(len(item) // 2):
buff.append([item[i], item[i+1]])
tuple_coords.append(buff)
buff = []
return tuple_coords
def count_area(self):
movable_figures_coords = self.get_figures()
figure_coords = movable_figures_coords[0]
area = self.area(figure_coords)
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Площадь фигуры: {area}')
#print(figure_coords)
#print(f'Area: {area}')
def summ(self,cord=None): # 5 проверка на выпуклость
if cord is None: cord = self.cord # список в котором парами стоят координаты
for i in range(len(cord)+1):
if cord[i][0]*cord[(i+1)%len(cord)][1]-cord[(i+1)%len(cord)][0]*cord[i][1] < 0:
return -1 # не выпуклый
else:
return 1 # выпуклый
def area(self,cord): # площадь
#if cord is None: cord = self.cord
#cord = [[-13527, 250], [250, -13542], [-13542, 330]]
#print(cord)
sm = 0
for i in range(len(cord)):
sm += cord[i][0]*cord[(i+1)%len(cord)][1]-cord[i][1]*cord[(i+1)%len(cord)][0]
return abs(sm)/2
def ang(self): # возвращает ближ к началу координат вершину
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
lm = None
coord = ()
for i in cord:
if lm is None or lm>((i[0]-375)**2+(i[1]-375)**2)**0.5:
lm = ((i[0]-375)**2+(i[1]-375)**2)**0.5
coord = i
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Ближ к началу координат вершина: {coord}')
#return coord
def long_s(self): # находит ребро с минимальной длиной
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
lm = None
coord = None
for i in range(len(cord)):
if lm is None or lm < ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5:
lm = ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
coord = (cord[i],cord[(i+1)%len(cord)])
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Точки ребра с минимальной длиной: {coord}, \n'
f'Длина этого ребра: {round(lm, 3)}')
#return coord,round(lm,3) # возвращает точки ребра и длину этого ребра
def long_s_py(self, cord): # находит ребро с минимальной длиной
lm = None
coord = None
for i in range(len(cord)):
if lm is None or lm < ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5:
lm = ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
coord = (cord[i],cord[(i+1)%len(cord)])
return cord, round(lm,3) # возвращает точки ребра и длину этого ребра
def perimeter(self): # находит периметр
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
pr = 0
for i in range(len(cord)):
pr += ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Периметр фигуры ({len(movable_figures_coords[0])}-угольника): {round(pr, 2)}')
#return round(pr,2) # возвращает периметр
def max_ar(self): # находит многоульник с макс площадью
movable_figures_coords = self.get_figures()
coord = movable_figures_coords # список со всеми фигурами(подсписками)
sm = None
fg = None
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for i in coord:
if sm is None or sm < self.area(i):
sm = self.area(i)
fg = i
# получаем тэг фигуры для манипуляций с помощью индекса координат(фигуры с макс пл)
figure = movable_figures[movable_figures_coords.index(fg)]
self.canvas.create_line(fg[0], fg[-1], fill='black')
self.canvas.itemconfigure(figure, fill='black') # покрасить фигуру с макс пл в черный
self.canvas.tag_raise(figure) # поднять фигуру с макс площадью на передний план
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Координаты фигуры с макс площадью: {fg} \n'
f'Макс площадь: {round(sm, 2)}')
#return round(sm,2), fg # максимальная площадь и координаты фигуры
def incl_p(self,A,P=None):
if P is None: P = self.cord
def rotate(A, B, C):
return (B[0] - A[0]) * (C[1] - B[1]) - (B[1] - A[1]) * (C[0] - B[0])
def intersect(A, B, C, D):
return rotate(A, B, C) * rotate(A, B, D) <= 0 and rotate(C, D, A) * rotate(C, D, B) < 0
def pointloc(P, A):
n = len(P)
if rotate(P[0], P[1], A) < 0 or rotate(P[0], P[n - 1], A) > 0:
return False
p, r = 1, len(P) - 1
while r - p > 1:
q = (p + r) // 2
if rotate(P[0], P[q], A) < 0:
r = q
else:
p = q
return not intersect(P[0], A, P[p], P[r])
def centerWindow(self):
w = 880
h = 96
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def main():
root = Tk()
ex = Objects(root)
root.mainloop()
if __name__ == '__main__':
main()
| from tkinter import *
import math
import random
class Figure:
def __init__(self, canvas, **coords):
self.__dict__.update(coords)
self.canvas = canvas
self.rectangles = []
self.triangles = []
self.hexagons = []
self.ovals = []
self.strips = []
self.overlapping_1 = []
self.overlapping_2 = []
self.symmetric_1 = []
self.symmetric_2 = []
self.thy = []
self.rectangles_generated = False
self.triangles_generated = False
self.hexagons_generated = False
self.ovals_generated = False
self.overlapping_generated = False
self.symmetric_generated = False
self.thy_generated = False
def create_rectangles(self):
while not self.rectangles_generated:
for i in range(1000):
self.rectangles.append(self.canvas.create_rectangle(
self.x1 + (i*40), self.y1, self.x2 + (i*40), self.y2, fill='black'))
self.rectangles.append(self.canvas.create_rectangle(
self.x1 - (i*40), self.y1, self.x2 - (i*40), self.y2, fill='black'))
self.rectangles_generated = True
for each in self.rectangles:
self.canvas.move(each, -1, 0)
self.canvas.after(10, self.create_rectangles)
def create_triangles(self):
while not self.triangles_generated:
for i in range(1000):
self.triangles.append(self.canvas.create_polygon(
self.x1 + (i*40), self.y1, self.x2 + (i*40), self.y2, self.x3 + (i*40), self.y3
))
self.triangles.append(self.canvas.create_polygon(
self.x1 - (i*40), self.y1, self.x2 - (i*40), self.y2, self.x3 - (i*40), self.y3
))
self.triangles_generated = True
for each in self.triangles:
self.canvas.move(each, 1, 0)
self.canvas.after(10, self.create_triangles)
def create_hexagons(self):
while not self.hexagons_generated:
for i in range(1000):
self.hexagons.append(self.canvas.create_polygon(
self.x4 + (i*80), self.y4, self.x5 + (i*80), self.y5, self.x6 + (i*80), self.y6,
self.x1 + (i*80), self.y1, self.x2 + (i*80), self.y2, self.x3 + (i*80), self.y3,
))
self.hexagons.append(self.canvas.create_polygon(
self.x1 - (i*80), self.y1, self.x2 - (i*80), self.y2, self.x3 - (i*80), self.y3,
self.x4 - (i*80), self.y4, self.x5 - (i*80), self.y5, self.x6 - (i*80), self.y6,
))
self.hexagons_generated = True
for each in self.hexagons:
if self.hexagons.index(each) // 3 :
self.canvas.itemconfigure(each, fill='darkred')
self.canvas.move(each, 10, 0)
self.canvas.after(10, self.create_hexagons)
def create_custom(self, n, angle):
de = ("%02x" % random.randint(0, 255))
re = ("%02x" % random.randint(0, 255))
we = ("%02x" % random.randint(0, 255))
ge = "#"
random_color = ge + de + re + we
self.canvas.scale(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1, self.y1, self.x2, self.y2, n, angle), fill=random_color), self.x1+15, self.y1+15, random.randint(1, 6), random.randint(1, 6))
def create_n(self, n, angle):
while not self.ovals_generated:
for i in range(1000):
self.ovals.append(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1 + (i*60), self.y1, self.x2 + (i*60), self.y2,
n, angle)
))
self.ovals.append(self.canvas.create_polygon(
self.get_n_angles_coords(self.x1 - (i*60), self.y1, self.x2 - (i*60), self.y2,
n, angle)
))
self.ovals_generated = True
def get_n_angles_coords(self, x1, y1, x2, y2, n, angle):
rotation = angle * math.pi / 180.0
# Оси
a = (x2 - x1) / 2.0
b = (y2 - y1) / 2.0
# Центр
xc = x1 + a
yc = y1 + b
point_list = []
for i in range(n):
theta = (math.pi * 2) * (float(i) / n)
x1 = a * math.cos(theta)
y1 = b * math.sin(theta)
# Поворачиваем x, y
x = (x1 * math.cos(rotation)) + (y1 * math.sin(rotation))
y = (y1 * math.cos(rotation)) - (x1 * math.sin(rotation))
point_list.append(round(x + xc))
point_list.append(round(y + yc))
return point_list
def create_3_strips(self): # 4.1
for i in range(1000):
self.strips.append(self.canvas.create_rectangle(
self.x1 + (i*50), self.y1 + (i*50), self.x2 + (i*50), self.y2 + (i*50), fill='red'
))
self.strips.append(self.canvas.create_rectangle(
self.x1 - (i*50), self.y1 - (i*50), self.x2 - (i*50), self.y2 - (i*50), fill='red'
))
self.strips.append(self.canvas.create_rectangle(
self.x1-25 + (i*50), self.y1 + (i*50), self.x2-25 + (i*50), self.y2 + (i*50), fill='blue'
))
self.strips.append(self.canvas.create_rectangle(
self.x1-25 - (i*50), self.y1 - (i*50), self.x2-25 - (i*50), self.y2 - (i*50), fill='blue'
))
self.strips.append(self.canvas.create_rectangle(
self.x1+25 + (i*50), self.y1 + (i*50), self.x2+25 + (i*50), self.y2 + (i*50), fill='yellow'
))
self.strips.append(self.canvas.create_rectangle(
self.x1+25 - (i*50), self.y1 - (i*50), self.x2+25 - (i*50), self.y2 - (i*50), fill='yellow'
))
def create_overlapping(self): # 4.2
while not self.overlapping_generated:
for i in range(1000):
self.overlapping_1.append(self.canvas.create_rectangle(
self.x1 + (i*50), self.y1 + (i*25), self.x2 + (i*50), self.y2 + (i*25), fill='red'
))
self.overlapping_1.append(self.canvas.create_rectangle(
self.x1 - (i*50), self.y1 - (i*25), self.x2 - (i*50), self.y2 - (i*25), fill='red'
))
self.overlapping_2.append(self.canvas.create_polygon(
self.x1_2 + (i*40), self.y1_2, self.x2_2 + (i*40), self.y2_2, fill='blue'
))
self.overlapping_2.append(self.canvas.create_rectangle(
self.x1_2 - (i*40), self.y1_2, self.x2_2 - (i*40), self.y2_2, fill='blue'
))
self.overlapping_generated = True
for each in self.overlapping_1:
self.canvas.move(each, 4, 2)
for each in self.overlapping_2:
self.canvas.move(each, 5, 0)
self.canvas.after(10, self.create_overlapping)
def create_symmetric(self): #4.3
while not self.symmetric_generated:
for i in range(1000):
self.symmetric_1.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 + (i*30), self.y1, self.x2 + (i*30), self.y2, n=3, angle=150)))
self.symmetric_1.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 - (i*30), self.y1, self.x2 - (i*30), self.y2, n=3, angle=150)))
self.symmetric_2.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 + (i*30), self.y1+30, self.x2 + (i*30), self.y2+30, n=3, angle=90)))
self.symmetric_2.append(self.canvas.create_polygon(
self.get_n_angles_coords(
self.x1 - (i*30), self.y1+30, self.x2 - (i*30), self.y2+30, n=3, angle=90)))
self.symmetric_generated = True
for each in self.symmetric_1:
self.canvas.move(each, -7, 0)
for each in self.symmetric_2:
self.canvas.move(each, 7, 0)
self.canvas.after(10, self.create_symmetric)
def create_thy(self): # 4.4
while not self.thy_generated:
for i in range(10):
for z in range(10):
self.thy.append(self.canvas.create_rectangle(375, 375, 375 + (i*20), 375 + (z*30), fill='black'))
self.thy.append(self.canvas.create_rectangle(375, 375, 375 - (i*20), 375 - (z*30), fill='white'))
self.thy_generated = True
self.canvas.after(10, self.create_thy)
class Objects(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background='white')
self.parent = parent
self.parent.title('Манипуляции с фигурами')
self.pack(fill=BOTH, expand=1)
self.centerWindow()
self.initUI()
self.all_figures = []
def initUI(self):
self.canvas_area = Button(
self, text='Поле', width=10, command=self.make_canvas)
self.rectangles = Button(
self, text='gen_rectangle()',
command=self.gen_rectangle, width=16)
self.rectangles.grid(row=0, column=1)
self.canvas_area.grid(row=0, column=0)
triangles = Button(self, text='gen_triangles()', command=self.gen_triangle, width=16)
hexagons = Button(self, text='gen_hexagons()', command=self.gen_hexagon, width=16)
n_angles = Button(self, text='n_угольники()', command=self.gen_n_angles, width=16)
rotating = Button(self, text='rotate()', command=self.rotate, width=16)
strips_1 = Button(self, text='4.1', command=self.make_strips, width=2)
strips_2 = Button(self, text='4.2', command=self.make_overlapping, width=2)
symmetric = Button(self, text='4.3', command=self.make_symmetric, width=2)
homotethy = Button(self, text='4.4', command=self.make_thy, width=2)
operations = Button(self, text='Операции', command=self.operations, width=16)
strips_1.grid(row=1, column=3, sticky='w')
strips_2.grid(row=1, column=3)
symmetric.grid(row=1, column=3, sticky='e')
homotethy.grid(row=2, column=3)
self.rotating_angle = Entry(self, width=16)
moving = Button(self, text='move()', command=self.move, width=16)
self.deltaxy = Entry(self, width=16)
n_angle_label = Label(self, width=20, text='Кол-во углов, поворот')
self.n_angle = Entry(self, width=16)
triangles.grid(row=0, column=2)
hexagons.grid(row=0, column=3)
rotating.grid(row=1, column=0)
moving.grid(row=1, column=1)
n_angles.grid(row=0, column=4)
n_angle_label.grid(row=1, column=4)
self.rotating_angle.grid(row=2, column=0)
self.deltaxy.grid(row=2, column=1)
self.n_angle.grid(row=2, column=4)
operations.grid(row=1, column=2)
def make_canvas(self):
self.canvas_window = Toplevel(self)
self.canvas = Canvas(self.canvas_window, width=750, height=750)
self.canvas.grid(row=0, column=0)
# TODO: frontend
for i in range(1000): # x,y axes
self.canvas.create_line(0 + (i*375), 375, 750 + (i*750), 375, width=2)
self.canvas.create_line(0 - (i*375), 375, 750 - (i*750), 375, width=2)
self.canvas.create_line(375, 750 + (i*750), 375, 0 + (i*750), width=2)
self.canvas.create_line(375, 750 - (i*750), 375, 0 - (i*750), width=2)
coords_grid = [i for i in range(1000000) if i % 100 == 0]
for i in range(1000):
if i > 0:
self.canvas.create_text(375 + (i*100), 385, text=f'{coords_grid[i]}')
self.canvas.create_text(375 - (i*100), 385, text=f'-{coords_grid[i]}')
self.canvas.create_text(395, 395 + (i*100), text=f'-{coords_grid[i]}')
self.canvas.create_text(395, 395 - (i*100), text=f'{coords_grid[i]}')
# Фиксация элементов координатной сетки, чтобы в дальнейшем её не двигать
self.canvas_grid = self.canvas.find_all()
def gen_rectangle(self):
all_rects = Figure(self.canvas, x1=250, y1=250, x2=275, y2=275)
all_rects.create_rectangles()
def gen_triangle(self):
all_triangles = Figure(self.canvas, x1=300, y1=250, x2=285, y2=330, x3=265, y3=250)
all_triangles.create_triangles()
def gen_hexagon(self):
all_hexagons = Figure(self.canvas, x1=235, y1=224, x2=265, y2=224, x3=280, y3=250,
x4=265, y4=276, x5=235, y5=276, x6=220, y6=250)
all_hexagons.create_hexagons()
def gen_n_angles(self):
all_n_angles = Figure(self.canvas, x1=250, y1=250, x2=290, y2=290)
n, angle = self.n_angle.get().split(',')
all_n_angles.create_n(int(n), int(angle))
def make_strips(self): # 4.1
strips = Figure(self.canvas, x1=375, y1=375, x2=395, y2=395)
strips.create_3_strips()
def make_overlapping(self): # 4.2
overlapping = Figure(self.canvas, x1=375, y1=375, x2=405, y2=405,
x1_2=500, y1_2=500, x2_2=530, y2_2=530)
overlapping.create_overlapping()
def make_symmetric(self): # 4.3
symmetric = Figure(self.canvas, x1=400, y1=400, x2=430, y2=430)
symmetric.create_symmetric()
def make_thy(self): # 4.4
thy = Figure(self.canvas, x1=375, y1=375, x2=400, y2=400)
thy.create_thy()
def move(self):
"""Параллельный перенос"""
deltax, deltay = self.deltaxy.get().split(',')
all_figures = self.canvas.find_all()
# Фигуры, которые можно двигать (без поля)
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for each in movable_figures:
self.canvas.move(each, deltax, deltay)
self.canvas.after(10, self.move)
def rotate(self):
angle = int(self.rotating_angle.get())
#rotation = angle * math.pi / 180.0
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
box_coords = []
real_coords = []
for each in movable_figures:
box_coords.append(self.canvas.bbox(each))
real_coords.append(self.canvas.coords(each))
#print(box_coords)
new_coords = []
rotator = Figure(self.canvas)
for item in box_coords:
x1 = item[0]
y1 = item[1]
x2 = item[2]
y2 = item[3]
n = len(real_coords[box_coords.index(item)]) // 2 # кол-во углов у фигуры
new_coords.append(rotator.get_n_angles_coords(x1, y1, x2, y2, n, angle))
#print(new_coords)
#print(real_coords)
for figure in movable_figures:
self.rotating(figure, new_coords[movable_figures.index(figure)])
def rotating(self, figure, *args):
self.canvas.coords(figure, [float(x) for x in args[0]])
def operations(self): # Окно с операциями из 5,8 заданий
self.operations_window = Toplevel(self)
count_area = Button(self.operations_window, text='Площадь', command=self.count_area, width=16)
custom_figure = Button(self.operations_window, text='Фигура', command=self.custom_figure, width=8)
resize = Button(self.operations_window, text='Размер', command=self.resize, width=16)
ang = Button(self.operations_window, text='Ближайшая вершина', command=self.ang, width=16)
min_edge = Button(self.operations_window, text='Минимальное ребро', command=self.long_s, width=16)
perimeter = Button(self.operations_window, text='Периметр', command=self.perimeter, width=16)
max_area = Button(self.operations_window, text='Максимальная S', command=self.max_ar, width=16)
area_filtration = Button(self.operations_window, text='Фильтрация площади', command=self.area_filter, width=16)
min_len_filter = Button(self.operations_window, text='Фил по мин ребру', command=self.min_len_filter, width=16)
find_location_angle = Button(self.operations_window, text='Фил по вхож в коорд', command=self.find_angle_location, width=16)
self.min_len_filter_count = Entry(self.operations_window, width=16)
self.area_filter_count = Entry(self.operations_window, width=16)
self.find_location = Entry(self.operations_window, width=16)
count_area.grid(row=0, column=5)
custom_figure.grid(row=0, column=0)
resize.grid(row=0, column=1)
ang.grid(row=0,column=2, sticky='w')
min_edge.grid(row=0, column=3, sticky='w')
perimeter.grid(row=0, column=4, sticky='w')
max_area.grid(row=1, column=5)
area_filtration.grid(row=1, column=1)
min_len_filter.grid(row=1, column=2)
find_location_angle.grid(row=1, column=3)
self.area_filter_count.grid(row=2, column=1)
self.min_len_filter_count.grid(row=2, column=2)
self.find_location.grid(row=2, column=3)
self.answer = Text(self.operations_window, width=50, height=4)
self.answer.grid(row=1, column=0)
def resize(self): # Изменить размер
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for each in movable_figures:
self.canvas.scale(each, 375, 375, 1.2, 1.2)
def area_filter(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
areas = []
areas_indexes = []
for each in movable_figures_coords:
if self.area(each) < float(self.area_filter_count.get()):
areas_indexes.append(movable_figures_coords.index(each))
for each in areas:
if each < int(self.area_filter_count.get()):
areas_indexes.append(areas.index(each))
for i in areas_indexes:
self.canvas.delete(movable_figures[i])
def min_len_filter(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
areas = []
min_indexes = []
for each in movable_figures_coords:
if self.long_s_py(each)[1] < int(self.min_len_filter_count.get()):
min_indexes.append(movable_figures_coords.index(each))
for i in min_indexes:
self.canvas.delete(movable_figures[i])
def find_angle_location(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
movable_figures_coords = self.get_figures()
location_indexes = []
location = self.find_location.get().split(',')
for each in movable_figures_coords:
if location in self.canvas.coords(each):
location_indexes.append(movable_figures_coords.index(each))
for i in location_indexes:
self.canvas.delete(movable_figures[i])
def custom_figure(self):
n, angle = self.n_angle.get().split(',')
x1 = random.randint(100, 600)
y1 = x1
x2 = x1 + 30
y2 = x2
figure = Figure(self.canvas, x1=x1, y1=y1, x2=x2, y2=y2)
figure.create_custom(int(n), int(angle))
def get_figures(self):
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
real_coords = []
for each in movable_figures:
real_coords.append(self.canvas.coords(each))
tuple_coords = []
buff = []
for item in real_coords:
for i in range(len(item) // 2):
buff.append([item[i], item[i+1]])
tuple_coords.append(buff)
buff = []
return tuple_coords
def count_area(self):
movable_figures_coords = self.get_figures()
figure_coords = movable_figures_coords[0]
area = self.area(figure_coords)
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Площадь фигуры: {area}')
#print(figure_coords)
#print(f'Area: {area}')
def summ(self,cord=None): # 5 проверка на выпуклость
if cord is None: cord = self.cord # список в котором парами стоят координаты
for i in range(len(cord)+1):
if cord[i][0]*cord[(i+1)%len(cord)][1]-cord[(i+1)%len(cord)][0]*cord[i][1] < 0:
return -1 # не выпуклый
else:
return 1 # выпуклый
def area(self,cord): # площадь
#if cord is None: cord = self.cord
#cord = [[-13527, 250], [250, -13542], [-13542, 330]]
#print(cord)
sm = 0
for i in range(len(cord)):
sm += cord[i][0]*cord[(i+1)%len(cord)][1]-cord[i][1]*cord[(i+1)%len(cord)][0]
return abs(sm)/2
def ang(self): # возвращает ближ к началу координат вершину
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
lm = None
coord = ()
for i in cord:
if lm is None or lm>((i[0]-375)**2+(i[1]-375)**2)**0.5:
lm = ((i[0]-375)**2+(i[1]-375)**2)**0.5
coord = i
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Ближ к началу координат вершина: {coord}')
#return coord
def long_s(self): # находит ребро с минимальной длиной
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
lm = None
coord = None
for i in range(len(cord)):
if lm is None or lm < ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5:
lm = ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
coord = (cord[i],cord[(i+1)%len(cord)])
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Точки ребра с минимальной длиной: {coord}, \n'
f'Длина этого ребра: {round(lm, 3)}')
#return coord,round(lm,3) # возвращает точки ребра и длину этого ребра
def long_s_py(self, cord): # находит ребро с минимальной длиной
lm = None
coord = None
for i in range(len(cord)):
if lm is None or lm < ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5:
lm = ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
coord = (cord[i],cord[(i+1)%len(cord)])
return cord, round(lm,3) # возвращает точки ребра и длину этого ребра
def perimeter(self): # находит периметр
movable_figures_coords = self.get_figures()
cord = movable_figures_coords[0]
pr = 0
for i in range(len(cord)):
pr += ((cord[(i+1)%len(cord)][0]-cord[i][0]) ** 2 + (cord[(i+1)%len(cord)][1]-cord[i][1]) ** 2) ** 0.5
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Периметр фигуры ({len(movable_figures_coords[0])}-угольника): {round(pr, 2)}')
#return round(pr,2) # возвращает периметр
def max_ar(self): # находит многоульник с макс площадью
movable_figures_coords = self.get_figures()
coord = movable_figures_coords # список со всеми фигурами(подсписками)
sm = None
fg = None
all_figures = self.canvas.find_all()
movable_figures = list(set(all_figures) - set(self.canvas_grid))
for i in coord:
if sm is None or sm < self.area(i):
sm = self.area(i)
fg = i
# получаем тэг фигуры для манипуляций с помощью индекса координат(фигуры с макс пл)
figure = movable_figures[movable_figures_coords.index(fg)]
self.canvas.create_line(fg[0], fg[-1], fill='black')
self.canvas.itemconfigure(figure, fill='black') # покрасить фигуру с макс пл в черный
self.canvas.tag_raise(figure) # поднять фигуру с макс площадью на передний план
self.answer.delete(0.0, END)
self.answer.insert(1.0, f'Координаты фигуры с макс площадью: {fg} \n'
f'Макс площадь: {round(sm, 2)}')
#return round(sm,2), fg # максимальная площадь и координаты фигуры
def incl_p(self,A,P=None):
if P is None: P = self.cord
def rotate(A, B, C):
return (B[0] - A[0]) * (C[1] - B[1]) - (B[1] - A[1]) * (C[0] - B[0])
def intersect(A, B, C, D):
return rotate(A, B, C) * rotate(A, B, D) <= 0 and rotate(C, D, A) * rotate(C, D, B) < 0
def pointloc(P, A):
n = len(P)
if rotate(P[0], P[1], A) < 0 or rotate(P[0], P[n - 1], A) > 0:
return False
p, r = 1, len(P) - 1
while r - p > 1:
q = (p + r) // 2
if rotate(P[0], P[q], A) < 0:
r = q
else:
p = q
return not intersect(P[0], A, P[p], P[r])
def centerWindow(self):
w = 880
h = 96
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def main():
root = Tk()
ex = Objects(root)
root.mainloop()
if __name__ == '__main__':
main()
| ru | 0.967322 | # Оси # Центр # Поворачиваем x, y # 4.1 # 4.2 #4.3 # 4.4 # TODO: frontend # x,y axes # Фиксация элементов координатной сетки, чтобы в дальнейшем её не двигать # 4.1 # 4.2 # 4.3 # 4.4 Параллельный перенос # Фигуры, которые можно двигать (без поля) #rotation = angle * math.pi / 180.0 #print(box_coords) # кол-во углов у фигуры #print(new_coords) #print(real_coords) # Окно с операциями из 5,8 заданий # Изменить размер #print(figure_coords) #print(f'Area: {area}') # 5 проверка на выпуклость # список в котором парами стоят координаты # не выпуклый # выпуклый # площадь #if cord is None: cord = self.cord #cord = [[-13527, 250], [250, -13542], [-13542, 330]] #print(cord) # возвращает ближ к началу координат вершину #return coord # находит ребро с минимальной длиной #return coord,round(lm,3) # возвращает точки ребра и длину этого ребра # находит ребро с минимальной длиной # возвращает точки ребра и длину этого ребра # находит периметр #return round(pr,2) # возвращает периметр # находит многоульник с макс площадью # список со всеми фигурами(подсписками) # получаем тэг фигуры для манипуляций с помощью индекса координат(фигуры с макс пл) # покрасить фигуру с макс пл в черный # поднять фигуру с макс площадью на передний план #return round(sm,2), fg # максимальная площадь и координаты фигуры | 3.220697 | 3 |
Attack_models/Random_failure.py | utkarsh4499/Identifying-precursors-of-tipping-points | 2 | 6614620 | import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import itertools
import random
import operator
import math
import pickle
import os
from tqdm import tqdm
import sys
# Random failures
# contingencysize is the number of nodes to be removed in one step
# graph is the original network
def robustness(contingencysize, graph):
original_graph = graph.copy()
largestcluster = max(nx.connected_component_subgraphs(original_graph), key=len)
nodes = []
for q in range(0,original_graph.number_of_nodes()):
nodes.append(q)
sizeratio = []
sizeratio.append([0.0,1.0])
for m in range(contingencysize,original_graph.number_of_nodes(),contingencysize):
iterable = []
for j in range(0,100):
iterable.append(list(np.random.choice(nodes, contingencysize, replace=False))) # contingencysize is r in nCr
emptylist = []
for i in iterable:
G = graph.copy()
G.remove_nodes_from(i)
Numberconnectedcomponents = max(nx.connected_component_subgraphs(G), key=len)
emptylist.append([i,len(Numberconnectedcomponents)/len(largestcluster)])
G = graph.copy()
d = min(list(j for i,j in emptylist)) # d is the minimum value of SCF
sizeratio.append([m/contingencysize, d])
b = [x for x,y in emptylist if y==d][0] # b is the corressponding of d
graph.remove_nodes_from(b) # remove the nodes that cause maximum damage and update the graph
for k in b:
nodes.remove(k)
sizeratio.append([math.ceil(original_graph.number_of_nodes()/contingencysize),0.0])
return sizeratio
| import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import itertools
import random
import operator
import math
import pickle
import os
from tqdm import tqdm
import sys
# Random failures
# contingencysize is the number of nodes to be removed in one step
# graph is the original network
def robustness(contingencysize, graph):
original_graph = graph.copy()
largestcluster = max(nx.connected_component_subgraphs(original_graph), key=len)
nodes = []
for q in range(0,original_graph.number_of_nodes()):
nodes.append(q)
sizeratio = []
sizeratio.append([0.0,1.0])
for m in range(contingencysize,original_graph.number_of_nodes(),contingencysize):
iterable = []
for j in range(0,100):
iterable.append(list(np.random.choice(nodes, contingencysize, replace=False))) # contingencysize is r in nCr
emptylist = []
for i in iterable:
G = graph.copy()
G.remove_nodes_from(i)
Numberconnectedcomponents = max(nx.connected_component_subgraphs(G), key=len)
emptylist.append([i,len(Numberconnectedcomponents)/len(largestcluster)])
G = graph.copy()
d = min(list(j for i,j in emptylist)) # d is the minimum value of SCF
sizeratio.append([m/contingencysize, d])
b = [x for x,y in emptylist if y==d][0] # b is the corressponding of d
graph.remove_nodes_from(b) # remove the nodes that cause maximum damage and update the graph
for k in b:
nodes.remove(k)
sizeratio.append([math.ceil(original_graph.number_of_nodes()/contingencysize),0.0])
return sizeratio
| en | 0.90178 | # Random failures # contingencysize is the number of nodes to be removed in one step # graph is the original network # contingencysize is r in nCr # d is the minimum value of SCF # b is the corressponding of d # remove the nodes that cause maximum damage and update the graph | 2.812906 | 3 |
src/scrape_reddit_bigger_list.py | TheShadow29/subreddit-classification-dataset | 2 | 6614621 | <filename>src/scrape_reddit_bigger_list.py
"""
Main scraper file. Would include helper functions
Author: <NAME>
"""
# import praw
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
import pandas as pd
from reddit_crawler import get_default_reddit_inst
class Crawl:
"""
A simple wrapper for convenient data scraping
"""
def __init__(self, crawler=None):
if crawler is None:
self.crawler = get_default_reddit_inst()
else:
self.crawler = crawler
self.header_lst = ['is_archived', 'num_gilded', 'is_duplicate', 'is_meta', 'is_self',
'perm_link', 'is_stickied', 'score', 'ups', 'downs', 'subreddit_name',
'title', 'text', 'create_time', 'captured_time']
def scrape(self, subreddit, num_to_scrape=10, scrape_from='hot', out_list=None):
"""
subreddit is either a string or
belongs to subreddit class from praw
num_to_scrape: number of submissions to scrape
scrape_from which mode to scrape from
out_list to be returned
"""
# If string convert to subreddit type which
# be used for querying
if isinstance(subreddit, str):
subreddit = self.crawler.subreddit(subreddit)
# Get the following fields:
# archived, gilded, duplicates exist,
# is_meta, is_self, permalink,
# stickied, score, ups, downs,
# subreddit name, title, selftext
# created_at_utc, captured_at_utc
# Need the scrape_from to belong to one of the
# given categories
assert hasattr(subreddit, scrape_from)
subm_generator = getattr(subreddit, scrape_from)
if out_list is None:
out_list = []
for subm in tqdm(subm_generator(limit=num_to_scrape), total=num_to_scrape):
tmp_dct = subm.__dict__
out_list.append(tmp_dct)
return out_list
if __name__ == '__main__':
small_sr_list = list(pd.read_csv(
'./low_filtered_strict.csv')['subreddit_name'])
cr = Crawl()
for scrape_from in ['top', 'controversial', 'hot']:
tdir = Path(f'./subr_big_csvs_{scrape_from}')
tdir.mkdir(exist_ok=True)
for i, sr in enumerate(small_sr_list):
try:
if (tdir / f'{sr}.csv').exists():
continue
olist = cr.scrape(sr, num_to_scrape=1000,
scrape_from=scrape_from)
df_out = pd.DataFrame(olist, columns=cr.header_lst)
df_out.to_csv(tdir / f'{sr}.csv', index=False, header=True)
print(i, f'Finished {sr}_{scrape_from} subreddit')
except Exception:
pass
# continue
| <filename>src/scrape_reddit_bigger_list.py
"""
Main scraper file. Would include helper functions
Author: <NAME>
"""
# import praw
from datetime import datetime
from pathlib import Path
from tqdm import tqdm
import pandas as pd
from reddit_crawler import get_default_reddit_inst
class Crawl:
"""
A simple wrapper for convenient data scraping
"""
def __init__(self, crawler=None):
if crawler is None:
self.crawler = get_default_reddit_inst()
else:
self.crawler = crawler
self.header_lst = ['is_archived', 'num_gilded', 'is_duplicate', 'is_meta', 'is_self',
'perm_link', 'is_stickied', 'score', 'ups', 'downs', 'subreddit_name',
'title', 'text', 'create_time', 'captured_time']
def scrape(self, subreddit, num_to_scrape=10, scrape_from='hot', out_list=None):
"""
subreddit is either a string or
belongs to subreddit class from praw
num_to_scrape: number of submissions to scrape
scrape_from which mode to scrape from
out_list to be returned
"""
# If string convert to subreddit type which
# be used for querying
if isinstance(subreddit, str):
subreddit = self.crawler.subreddit(subreddit)
# Get the following fields:
# archived, gilded, duplicates exist,
# is_meta, is_self, permalink,
# stickied, score, ups, downs,
# subreddit name, title, selftext
# created_at_utc, captured_at_utc
# Need the scrape_from to belong to one of the
# given categories
assert hasattr(subreddit, scrape_from)
subm_generator = getattr(subreddit, scrape_from)
if out_list is None:
out_list = []
for subm in tqdm(subm_generator(limit=num_to_scrape), total=num_to_scrape):
tmp_dct = subm.__dict__
out_list.append(tmp_dct)
return out_list
if __name__ == '__main__':
small_sr_list = list(pd.read_csv(
'./low_filtered_strict.csv')['subreddit_name'])
cr = Crawl()
for scrape_from in ['top', 'controversial', 'hot']:
tdir = Path(f'./subr_big_csvs_{scrape_from}')
tdir.mkdir(exist_ok=True)
for i, sr in enumerate(small_sr_list):
try:
if (tdir / f'{sr}.csv').exists():
continue
olist = cr.scrape(sr, num_to_scrape=1000,
scrape_from=scrape_from)
df_out = pd.DataFrame(olist, columns=cr.header_lst)
df_out.to_csv(tdir / f'{sr}.csv', index=False, header=True)
print(i, f'Finished {sr}_{scrape_from} subreddit')
except Exception:
pass
# continue
| en | 0.715328 | Main scraper file. Would include helper functions Author: <NAME> # import praw A simple wrapper for convenient data scraping subreddit is either a string or belongs to subreddit class from praw num_to_scrape: number of submissions to scrape scrape_from which mode to scrape from out_list to be returned # If string convert to subreddit type which # be used for querying # Get the following fields: # archived, gilded, duplicates exist, # is_meta, is_self, permalink, # stickied, score, ups, downs, # subreddit name, title, selftext # created_at_utc, captured_at_utc # Need the scrape_from to belong to one of the # given categories # continue | 3.217042 | 3 |
piafedit/model/source/raw_data_source.py | flegac/piaf-edit | 0 | 6614622 | import logging
import uuid
from piafedit.model.libs.operator import Buffer
from piafedit.model.source.data_source import DataSource
from piafedit.model.source.source_infos import SourceInfos
from piafedit.model.source.window import Window
log = logging.getLogger()
class RawDataSource(DataSource):
def __init__(self, data: Buffer):
super().__init__()
self.data = data
self._infos = SourceInfos(
name=str(uuid.uuid4()),
dtype=str(data.dtype),
shape=data.shape
)
def infos(self) -> SourceInfos:
return self._infos
def write(self, buffer: Buffer, window: Window = None):
data = self.update_window(window).crop(self.data)
data[...] = buffer
def read(self, window: Window = None) -> Buffer:
import cv2
data = self.update_window(window).window.crop(self.data)
if window.size:
data = cv2.resize(data, dsize=window.size.raw(), interpolation=cv2.INTER_CUBIC)
return data
| import logging
import uuid
from piafedit.model.libs.operator import Buffer
from piafedit.model.source.data_source import DataSource
from piafedit.model.source.source_infos import SourceInfos
from piafedit.model.source.window import Window
log = logging.getLogger()
class RawDataSource(DataSource):
def __init__(self, data: Buffer):
super().__init__()
self.data = data
self._infos = SourceInfos(
name=str(uuid.uuid4()),
dtype=str(data.dtype),
shape=data.shape
)
def infos(self) -> SourceInfos:
return self._infos
def write(self, buffer: Buffer, window: Window = None):
data = self.update_window(window).crop(self.data)
data[...] = buffer
def read(self, window: Window = None) -> Buffer:
import cv2
data = self.update_window(window).window.crop(self.data)
if window.size:
data = cv2.resize(data, dsize=window.size.raw(), interpolation=cv2.INTER_CUBIC)
return data
| none | 1 | 2.357101 | 2 | |
library/openshift_v1_route.py | ansible/ansible-kubernetes-modules- | 91 | 6614623 | <reponame>ansible/ansible-kubernetes-modules-
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException
DOCUMENTATION = '''
module: openshift_v1_route
short_description: OpenShift Route
description:
- Manage the lifecycle of a route object. Supports check mode, and attempts to to
be idempotent.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource that may
be set by external tools to store and retrieve arbitrary metadata. They are
not queryable and should be preserved when modifying objects.
type: dict
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
labels:
description:
- Map of string keys and values that can be used to organize and categorize (scope
and select) objects. May match selectors of replication controllers and services.
type: dict
name:
description:
- Name must be unique within a namespace. Is required when creating resources,
although some resources may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation idempotence and
configuration definition. Cannot be updated.
namespace:
description:
- Namespace defines the space within each name must be unique. An empty namespace
is equivalent to the "default" namespace, but "default" is the canonical representation.
Not all objects are required to be scoped to a namespace - the value of this
field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated.
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
spec_alternate_backends:
description:
- alternateBackends allows up to 3 additional backends to be assigned to the route.
Only the Service kind is allowed, and it will be defaulted to Service. Use the
weight field in RouteTargetReference object to specify relative preference.
aliases:
- alternate_backends
type: list
spec_host:
description:
- host is an alias/DNS that points to the service. Optional. If not specified
a route name will typically be automatically chosen. Must follow DNS952 subdomain
conventions.
spec_path:
description:
- Path that the router watches for, to route traffic for to the service. Optional
aliases:
- path
spec_port_target_port:
description:
- The target port on pods selected by the service this route points to. If this
is a string, it will be looked up as a named port in the target endpoints port
list. Required
aliases:
- port_target_port
type: object
spec_tls_ca_certificate:
description:
- caCertificate provides the cert authority certificate contents
aliases:
- tls_ca_certificate
spec_tls_certificate:
description:
- certificate provides certificate contents
aliases:
- tls_certificate
spec_tls_destination_ca_certificate:
description:
- destinationCACertificate provides the contents of the ca certificate of the
final destination. When using reencrypt termination this file should be provided
in order to have routers use it for health checks on the secure connection.
If this field is not specified, the router may provide its own destination CA
and perform hostname validation using the short service name (service.namespace.svc),
which allows infrastructure generated certificates to automatically verify.
aliases:
- tls_destination_ca_certificate
spec_tls_insecure_edge_termination_policy:
description:
- insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections
to a route. While each router may make its own decisions on which ports to expose,
this is normally port 80. * Allow - traffic is sent to the server on the insecure
port (default) * Disable - no traffic is allowed on the insecure port. * Redirect
- clients are redirected to the secure port.
aliases:
- tls_insecure_edge_termination_policy
spec_tls_key:
description:
- key provides key file contents
aliases:
- tls_key
spec_tls_termination:
description:
- termination indicates termination type.
aliases:
- tls_termination
spec_to_kind:
description:
- The kind of target that the route is referring to. Currently, only 'Service'
is allowed
aliases:
- to_kind
spec_to_name:
description:
- name of the service/target that is being referred to. e.g. name of the service
aliases:
- to_name
spec_to_weight:
description:
- weight as an integer between 0 and 256, default 1, that specifies the target's
relative weight against other target reference objects. 0 suppresses requests
to this backend.
aliases:
- to_weight
type: int
spec_wildcard_policy:
description:
- Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is
allowed.
aliases:
- wildcard_policy
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- openshift == 0.4.0.a1
'''
EXAMPLES = '''
- name: Create route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: present
host: www.example.com
spec_to_kind: Service
spec_to_name: service-name
tls_termination: edge
tls_key: |-
-----BEGIN PRIVATE KEY-----
key_file_contents
-----END PRIVATE KEY-----
tls_certificate: |-
-----BEGIN CERTIFICATE-----
certificate contents
-----END CERTIFICATE-----
tls_ca_certificate: |-
-----BEGIN CERTIFICATE-----
ca_certificate_contents
-----END CERTIFICATE-----
- name: Patch route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: present
host: www.example.com
tls_termination: reencrypt
spec_to_kind: Service
spec_to_name: other-service-name
- name: Replace route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: replaced
host: www.example.com
path: /foo/bar/baz.html
spec_to_kind: Service
spec_to_name: whimsy-name
tls_termination: edge
- name: Remove route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: absent
'''
RETURN = '''
api_version:
description: Requested API version
type: string
route:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard object metadata.
type: complex
spec:
description:
- spec is the desired state of the route
type: complex
status:
description:
- status is the current state of the route
type: complex
'''
def main():
try:
module = OpenShiftAnsibleModule('route', 'v1')
except OpenShiftAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except OpenShiftAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.openshift_common import OpenShiftAnsibleModule, OpenShiftAnsibleException
DOCUMENTATION = '''
module: openshift_v1_route
short_description: OpenShift Route
description:
- Manage the lifecycle of a route object. Supports check mode, and attempts to to
be idempotent.
version_added: 2.3.0
author: OpenShift (@openshift)
options:
annotations:
description:
- Annotations is an unstructured key value map stored with a resource that may
be set by external tools to store and retrieve arbitrary metadata. They are
not queryable and should be preserved when modifying objects.
type: dict
api_key:
description:
- Token used to connect to the API.
cert_file:
description:
- Path to a certificate used to authenticate with the API.
type: path
context:
description:
- The name of a context found in the Kubernetes config file.
debug:
description:
- Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log
default: false
type: bool
force:
description:
- If set to C(True), and I(state) is C(present), an existing object will updated,
and lists will be replaced, rather than merged.
default: false
type: bool
host:
description:
- Provide a URL for acessing the Kubernetes API.
key_file:
description:
- Path to a key file used to authenticate with the API.
type: path
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the openshift client will attempt to load the default
configuration file from I(~/.kube/config.json).
type: path
labels:
description:
- Map of string keys and values that can be used to organize and categorize (scope
and select) objects. May match selectors of replication controllers and services.
type: dict
name:
description:
- Name must be unique within a namespace. Is required when creating resources,
although some resources may allow a client to request the generation of an appropriate
name automatically. Name is primarily intended for creation idempotence and
configuration definition. Cannot be updated.
namespace:
description:
- Namespace defines the space within each name must be unique. An empty namespace
is equivalent to the "default" namespace, but "default" is the canonical representation.
Not all objects are required to be scoped to a namespace - the value of this
field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated.
password:
description:
- Provide a password for connecting to the API. Use in conjunction with I(username).
resource_definition:
description:
- Provide the YAML definition for the object, bypassing any modules parameters
intended to define object attributes.
type: dict
spec_alternate_backends:
description:
- alternateBackends allows up to 3 additional backends to be assigned to the route.
Only the Service kind is allowed, and it will be defaulted to Service. Use the
weight field in RouteTargetReference object to specify relative preference.
aliases:
- alternate_backends
type: list
spec_host:
description:
- host is an alias/DNS that points to the service. Optional. If not specified
a route name will typically be automatically chosen. Must follow DNS952 subdomain
conventions.
spec_path:
description:
- Path that the router watches for, to route traffic for to the service. Optional
aliases:
- path
spec_port_target_port:
description:
- The target port on pods selected by the service this route points to. If this
is a string, it will be looked up as a named port in the target endpoints port
list. Required
aliases:
- port_target_port
type: object
spec_tls_ca_certificate:
description:
- caCertificate provides the cert authority certificate contents
aliases:
- tls_ca_certificate
spec_tls_certificate:
description:
- certificate provides certificate contents
aliases:
- tls_certificate
spec_tls_destination_ca_certificate:
description:
- destinationCACertificate provides the contents of the ca certificate of the
final destination. When using reencrypt termination this file should be provided
in order to have routers use it for health checks on the secure connection.
If this field is not specified, the router may provide its own destination CA
and perform hostname validation using the short service name (service.namespace.svc),
which allows infrastructure generated certificates to automatically verify.
aliases:
- tls_destination_ca_certificate
spec_tls_insecure_edge_termination_policy:
description:
- insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections
to a route. While each router may make its own decisions on which ports to expose,
this is normally port 80. * Allow - traffic is sent to the server on the insecure
port (default) * Disable - no traffic is allowed on the insecure port. * Redirect
- clients are redirected to the secure port.
aliases:
- tls_insecure_edge_termination_policy
spec_tls_key:
description:
- key provides key file contents
aliases:
- tls_key
spec_tls_termination:
description:
- termination indicates termination type.
aliases:
- tls_termination
spec_to_kind:
description:
- The kind of target that the route is referring to. Currently, only 'Service'
is allowed
aliases:
- to_kind
spec_to_name:
description:
- name of the service/target that is being referred to. e.g. name of the service
aliases:
- to_name
spec_to_weight:
description:
- weight as an integer between 0 and 256, default 1, that specifies the target's
relative weight against other target reference objects. 0 suppresses requests
to this backend.
aliases:
- to_weight
type: int
spec_wildcard_policy:
description:
- Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is
allowed.
aliases:
- wildcard_policy
src:
description:
- Provide a path to a file containing the YAML definition of the object. Mutually
exclusive with I(resource_definition).
type: path
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
type: path
state:
description:
- Determines if an object should be created, patched, or deleted. When set to
C(present), the object will be created, if it does not exist, or patched, if
parameter values differ from the existing object's attributes, and deleted,
if set to C(absent). A patch operation results in merging lists and updating
dictionaries, with lists being merged into a unique set of values. If a list
contains a dictionary with a I(name) or I(type) attribute, a strategic merge
is performed, where individual elements with a matching I(name_) or I(type)
are merged. To force the replacement of lists, set the I(force) option to C(True).
default: present
choices:
- present
- absent
username:
description:
- Provide a username for connecting to the API.
verify_ssl:
description:
- Whether or not to verify the API server's SSL certificates.
type: bool
requirements:
- openshift == 0.4.0.a1
'''
EXAMPLES = '''
- name: Create route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: present
host: www.example.com
spec_to_kind: Service
spec_to_name: service-name
tls_termination: edge
tls_key: |-
-----BEGIN PRIVATE KEY-----
key_file_contents
-----END PRIVATE KEY-----
tls_certificate: |-
-----BEGIN CERTIFICATE-----
certificate contents
-----END CERTIFICATE-----
tls_ca_certificate: |-
-----BEGIN CERTIFICATE-----
ca_certificate_contents
-----END CERTIFICATE-----
- name: Patch route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: present
host: www.example.com
tls_termination: reencrypt
spec_to_kind: Service
spec_to_name: other-service-name
- name: Replace route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: replaced
host: www.example.com
path: /foo/bar/baz.html
spec_to_kind: Service
spec_to_name: whimsy-name
tls_termination: edge
- name: Remove route
openshift_v1_route.yml:
name: myroute
namespace: k8s-project
state: absent
'''
RETURN = '''
api_version:
description: Requested API version
type: string
route:
type: complex
returned: when I(state) = C(present)
contains:
api_version:
description:
- APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
type: str
kind:
description:
- Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to. Cannot
be updated. In CamelCase.
type: str
metadata:
description:
- Standard object metadata.
type: complex
spec:
description:
- spec is the desired state of the route
type: complex
status:
description:
- status is the current state of the route
type: complex
'''
def main():
try:
module = OpenShiftAnsibleModule('route', 'v1')
except OpenShiftAnsibleException as exc:
# The helper failed to init, so there is no module object. All we can do is raise the error.
raise Exception(exc.message)
try:
module.execute_module()
except OpenShiftAnsibleException as exc:
module.fail_json(msg="Module failed!", error=str(exc))
if __name__ == '__main__':
main() | en | 0.741673 | #!/usr/bin/python # -*- coding: utf-8 -*- module: openshift_v1_route short_description: OpenShift Route description: - Manage the lifecycle of a route object. Supports check mode, and attempts to to be idempotent. version_added: 2.3.0 author: OpenShift (@openshift) options: annotations: description: - Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. type: dict api_key: description: - Token used to connect to the API. cert_file: description: - Path to a certificate used to authenticate with the API. type: path context: description: - The name of a context found in the Kubernetes config file. debug: description: - Enable debug output from the OpenShift helper. Logging info is written to KubeObjHelper.log default: false type: bool force: description: - If set to C(True), and I(state) is C(present), an existing object will updated, and lists will be replaced, rather than merged. default: false type: bool host: description: - Provide a URL for acessing the Kubernetes API. key_file: description: - Path to a key file used to authenticate with the API. type: path kubeconfig: description: - Path to an existing Kubernetes config file. If not provided, and no other connection options are provided, the openshift client will attempt to load the default configuration file from I(~/.kube/config.json). type: path labels: description: - Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. type: dict name: description: - Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. namespace: description: - Namespace defines the space within each name must be unique. An empty namespace is equivalent to the "default" namespace, but "default" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. Must be a DNS_LABEL. Cannot be updated. password: description: - Provide a password for connecting to the API. Use in conjunction with I(username). resource_definition: description: - Provide the YAML definition for the object, bypassing any modules parameters intended to define object attributes. type: dict spec_alternate_backends: description: - alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. aliases: - alternate_backends type: list spec_host: description: - host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. spec_path: description: - Path that the router watches for, to route traffic for to the service. Optional aliases: - path spec_port_target_port: description: - The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required aliases: - port_target_port type: object spec_tls_ca_certificate: description: - caCertificate provides the cert authority certificate contents aliases: - tls_ca_certificate spec_tls_certificate: description: - certificate provides certificate contents aliases: - tls_certificate spec_tls_destination_ca_certificate: description: - destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. aliases: - tls_destination_ca_certificate spec_tls_insecure_edge_termination_policy: description: - insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. * Allow - traffic is sent to the server on the insecure port (default) * Disable - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port. aliases: - tls_insecure_edge_termination_policy spec_tls_key: description: - key provides key file contents aliases: - tls_key spec_tls_termination: description: - termination indicates termination type. aliases: - tls_termination spec_to_kind: description: - The kind of target that the route is referring to. Currently, only 'Service' is allowed aliases: - to_kind spec_to_name: description: - name of the service/target that is being referred to. e.g. name of the service aliases: - to_name spec_to_weight: description: - weight as an integer between 0 and 256, default 1, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. aliases: - to_weight type: int spec_wildcard_policy: description: - Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. aliases: - wildcard_policy src: description: - Provide a path to a file containing the YAML definition of the object. Mutually exclusive with I(resource_definition). type: path ssl_ca_cert: description: - Path to a CA certificate used to authenticate with the API. type: path state: description: - Determines if an object should be created, patched, or deleted. When set to C(present), the object will be created, if it does not exist, or patched, if parameter values differ from the existing object's attributes, and deleted, if set to C(absent). A patch operation results in merging lists and updating dictionaries, with lists being merged into a unique set of values. If a list contains a dictionary with a I(name) or I(type) attribute, a strategic merge is performed, where individual elements with a matching I(name_) or I(type) are merged. To force the replacement of lists, set the I(force) option to C(True). default: present choices: - present - absent username: description: - Provide a username for connecting to the API. verify_ssl: description: - Whether or not to verify the API server's SSL certificates. type: bool requirements: - openshift == 0.4.0.a1 - name: Create route openshift_v1_route.yml: name: myroute namespace: k8s-project state: present host: www.example.com spec_to_kind: Service spec_to_name: service-name tls_termination: edge tls_key: |- -----BEGIN PRIVATE KEY----- key_file_contents -----END PRIVATE KEY----- tls_certificate: |- -----BEGIN CERTIFICATE----- certificate contents -----END CERTIFICATE----- tls_ca_certificate: |- -----BEGIN CERTIFICATE----- ca_certificate_contents -----END CERTIFICATE----- - name: Patch route openshift_v1_route.yml: name: myroute namespace: k8s-project state: present host: www.example.com tls_termination: reencrypt spec_to_kind: Service spec_to_name: other-service-name - name: Replace route openshift_v1_route.yml: name: myroute namespace: k8s-project state: replaced host: www.example.com path: /foo/bar/baz.html spec_to_kind: Service spec_to_name: whimsy-name tls_termination: edge - name: Remove route openshift_v1_route.yml: name: myroute namespace: k8s-project state: absent api_version: description: Requested API version type: string route: type: complex returned: when I(state) = C(present) contains: api_version: description: - APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. type: str kind: description: - Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. type: str metadata: description: - Standard object metadata. type: complex spec: description: - spec is the desired state of the route type: complex status: description: - status is the current state of the route type: complex # The helper failed to init, so there is no module object. All we can do is raise the error. | 1.857555 | 2 |
perso/settings/prod.py | arthurio/site_heroku | 0 | 6614624 | from perso.settings import *
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| from perso.settings import *
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
| en | 0.488755 | # Parse database configuration from $DATABASE_URL # Honor the 'X-Forwarded-Proto' header for request.is_secure() | 1.59164 | 2 |
tests/unit/pypyr/utils/expressions_test.py | FooBarQuaxx/pypyr | 0 | 6614625 | <filename>tests/unit/pypyr/utils/expressions_test.py
"""expressions.py unit tests."""
from math import sqrt
import pytest
from pypyr.context import Context
import pypyr.utils.expressions as expressions
def test_simple_expr_none_dict():
"""Simple expression passes, with no locals dict."""
assert expressions.eval_string('1+1', None) == 2
def test_simple_expr_empty_dict():
"""Simple expression passes, with no locals dict."""
out = expressions.eval_string('len("123456") < 5', {})
assert isinstance(out, bool)
assert not out
def test_simple_expr_locals_dict():
"""Simple expression passes, with locals dict."""
assert expressions.eval_string('sqrt(4)', {'sqrt': sqrt}) == 2
def test_expr_dict_vars():
"""Expression uses vars from input dict."""
assert expressions.eval_string('(k1 + k2)*2==10', {'k1': 2, 'k2': 3})
def test_expr_dict_nested_vars():
"""Expression uses nested vars from input dict."""
assert expressions.eval_string('k2[2]["k2.2"] == 1.23',
{'k1': 1,
'k2': [0,
1,
{'k2.2': 1.23}
]
}
)
def test_expr_evals_bool():
"""Expression can work as a boolean type."""
out = expressions.eval_string("a", {'a': True})
assert isinstance(out, bool)
assert out
def test_expr_evals_complex():
"""Expression evaluates complex types."""
assert expressions.eval_string('{"a": "b"} == c', {'c': {'a': 'b'}})
def test_expr_runtime_error():
"""Expression raises expected type during runtime error."""
with pytest.raises(ZeroDivisionError):
expressions.eval_string('1/0', None)
def test_expr_invalid_syntax():
"""Expression raises when invalid sytntax on input."""
with pytest.raises(SyntaxError):
expressions.eval_string('invalid code here', None)
def test_expr_var_doesnt_exist():
"""Expression raises when variable not found in namespace."""
with pytest.raises(NameError):
expressions.eval_string('a', {'b': True})
def test_expr_func_when_context_as_locals():
"""Expression should use built-in function when Context used as locals."""
assert expressions.eval_string('len([0,1,2])', Context({'k1': 'v1'})) == 3
| <filename>tests/unit/pypyr/utils/expressions_test.py
"""expressions.py unit tests."""
from math import sqrt
import pytest
from pypyr.context import Context
import pypyr.utils.expressions as expressions
def test_simple_expr_none_dict():
"""Simple expression passes, with no locals dict."""
assert expressions.eval_string('1+1', None) == 2
def test_simple_expr_empty_dict():
"""Simple expression passes, with no locals dict."""
out = expressions.eval_string('len("123456") < 5', {})
assert isinstance(out, bool)
assert not out
def test_simple_expr_locals_dict():
"""Simple expression passes, with locals dict."""
assert expressions.eval_string('sqrt(4)', {'sqrt': sqrt}) == 2
def test_expr_dict_vars():
"""Expression uses vars from input dict."""
assert expressions.eval_string('(k1 + k2)*2==10', {'k1': 2, 'k2': 3})
def test_expr_dict_nested_vars():
"""Expression uses nested vars from input dict."""
assert expressions.eval_string('k2[2]["k2.2"] == 1.23',
{'k1': 1,
'k2': [0,
1,
{'k2.2': 1.23}
]
}
)
def test_expr_evals_bool():
"""Expression can work as a boolean type."""
out = expressions.eval_string("a", {'a': True})
assert isinstance(out, bool)
assert out
def test_expr_evals_complex():
"""Expression evaluates complex types."""
assert expressions.eval_string('{"a": "b"} == c', {'c': {'a': 'b'}})
def test_expr_runtime_error():
"""Expression raises expected type during runtime error."""
with pytest.raises(ZeroDivisionError):
expressions.eval_string('1/0', None)
def test_expr_invalid_syntax():
"""Expression raises when invalid sytntax on input."""
with pytest.raises(SyntaxError):
expressions.eval_string('invalid code here', None)
def test_expr_var_doesnt_exist():
"""Expression raises when variable not found in namespace."""
with pytest.raises(NameError):
expressions.eval_string('a', {'b': True})
def test_expr_func_when_context_as_locals():
"""Expression should use built-in function when Context used as locals."""
assert expressions.eval_string('len([0,1,2])', Context({'k1': 'v1'})) == 3
| en | 0.843617 | expressions.py unit tests. Simple expression passes, with no locals dict. Simple expression passes, with no locals dict. Simple expression passes, with locals dict. Expression uses vars from input dict. Expression uses nested vars from input dict. Expression can work as a boolean type. Expression evaluates complex types. Expression raises expected type during runtime error. Expression raises when invalid sytntax on input. Expression raises when variable not found in namespace. Expression should use built-in function when Context used as locals. | 2.936225 | 3 |
quiz_master/accounts/views.py | DiyanKalaydzhiev23/quiz_master | 0 | 6614626 | from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse_lazy
from quiz_master.accounts.forms import CreateProfileForm, EditProfileForm, EditUserForm
from django.views import generic as views
from django.contrib.auth import views as auth_views, get_user_model
from quiz_master.accounts.models import Profile
from quiz_master.common.quizzes_get_view_and_context import get
UserModel = get_user_model()
@login_required()
def profile_view(request, pk=None, quiz_name=None):
if request.method == 'GET':
return get(request, pk, quiz_name, 'accounts/profile.html')
else:
return get(request, pk, request.POST.get('quiz_name'), 'accounts/profile.html')
class EditProfile(LoginRequiredMixin, views.UpdateView):
model = Profile
template_name = 'accounts/edit_profile.html'
def get(self, *args, **kwargs):
context = {
'user_form': EditUserForm(instance=UserModel.objects.get(pk=self.get_object().user_id)),
'profile_form': EditProfileForm(instance=self.get_object()),
}
return self.render_to_response(context)
def post(self, *args, **kwargs):
user_data = {
'username': self.request.POST.get('username'),
'email': self.request.POST.get('email'),
}
profile_data = {
'first_name': self.request.POST.get('first_name'),
'last_name': self.request.POST.get('last_name'),
}
user_form = EditUserForm(
data=user_data,
instance=UserModel.objects.get(pk=self.get_object().user_id),
)
profile_form = EditProfileForm(
data=profile_data,
files=self.request.FILES,
instance=self.get_object()
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return redirect('profile', self.get_object().user_id)
context = {
'user_form': user_form,
'profile_form': profile_form,
'form_errors': user_form.errors.update(profile_form.errors.items()),
}
return self.render_to_response(context)
class DeleteUserView(LoginRequiredMixin, views.DeleteView):
model = UserModel
template_name = 'pages/delete_profile.html'
success_url = reverse_lazy('home')
class UserRegisterView(views.CreateView):
form_class = CreateProfileForm
template_name = 'accounts/signup.html'
success_url = reverse_lazy('login')
class UserLoginView(auth_views.LoginView):
template_name = 'accounts/signin.html'
success_url = reverse_lazy('quizzes')
def get_success_url(self):
if self.success_url:
return self.success_url
return super().get_success_url()
class UserLogoutView(LoginRequiredMixin, auth_views.LogoutView):
success_url = reverse_lazy('login')
| from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse_lazy
from quiz_master.accounts.forms import CreateProfileForm, EditProfileForm, EditUserForm
from django.views import generic as views
from django.contrib.auth import views as auth_views, get_user_model
from quiz_master.accounts.models import Profile
from quiz_master.common.quizzes_get_view_and_context import get
UserModel = get_user_model()
@login_required()
def profile_view(request, pk=None, quiz_name=None):
if request.method == 'GET':
return get(request, pk, quiz_name, 'accounts/profile.html')
else:
return get(request, pk, request.POST.get('quiz_name'), 'accounts/profile.html')
class EditProfile(LoginRequiredMixin, views.UpdateView):
model = Profile
template_name = 'accounts/edit_profile.html'
def get(self, *args, **kwargs):
context = {
'user_form': EditUserForm(instance=UserModel.objects.get(pk=self.get_object().user_id)),
'profile_form': EditProfileForm(instance=self.get_object()),
}
return self.render_to_response(context)
def post(self, *args, **kwargs):
user_data = {
'username': self.request.POST.get('username'),
'email': self.request.POST.get('email'),
}
profile_data = {
'first_name': self.request.POST.get('first_name'),
'last_name': self.request.POST.get('last_name'),
}
user_form = EditUserForm(
data=user_data,
instance=UserModel.objects.get(pk=self.get_object().user_id),
)
profile_form = EditProfileForm(
data=profile_data,
files=self.request.FILES,
instance=self.get_object()
)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
return redirect('profile', self.get_object().user_id)
context = {
'user_form': user_form,
'profile_form': profile_form,
'form_errors': user_form.errors.update(profile_form.errors.items()),
}
return self.render_to_response(context)
class DeleteUserView(LoginRequiredMixin, views.DeleteView):
model = UserModel
template_name = 'pages/delete_profile.html'
success_url = reverse_lazy('home')
class UserRegisterView(views.CreateView):
form_class = CreateProfileForm
template_name = 'accounts/signup.html'
success_url = reverse_lazy('login')
class UserLoginView(auth_views.LoginView):
template_name = 'accounts/signin.html'
success_url = reverse_lazy('quizzes')
def get_success_url(self):
if self.success_url:
return self.success_url
return super().get_success_url()
class UserLogoutView(LoginRequiredMixin, auth_views.LogoutView):
success_url = reverse_lazy('login')
| none | 1 | 2.142669 | 2 | |
find-second-maximum-number-in-a-list.py | 0xecho/HackerRankSolutions | 0 | 6614627 | <gh_stars>0
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
a = -101
b = max(arr)
for i in arr:
if i>a and i<b:
a=i
print(a)
| if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
a = -101
b = max(arr)
for i in arr:
if i>a and i<b:
a=i
print(a) | none | 1 | 3.396359 | 3 | |
cliffwalking_exp/model/AGENT.py | shengzhang37/Statistical-Inference-of-the-Value-Function-for-Reinforcement-Learning-in-Infinite-Horizon-Settings | 2 | 6614628 | <reponame>shengzhang37/Statistical-Inference-of-the-Value-Function-for-Reinforcement-Learning-in-Infinite-Horizon-Settings
from .simulator import *
from .agent_utility import *
import operator
from itertools import product
from itertools import accumulate
import numpy as np
import random
import pickle
import os.path
import time
from scipy.interpolate import BSpline
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from numpy.linalg import inv
from functools import reduce
from scipy.stats import norm
from scipy import integrate
from scipy.stats import norm
from tqdm import tqdm
"""
Totally tailed to cliff walking
1. modify the Action space (xi dimension)
2.
"""
class Agent(object):
def __init__(self, env, n = 50, reward_dicount = 0.5):
#############################################################################
#############################################################################
### self.env : store the dynamic environment
### self.n : store the number of patients(objects)
### self.gamma : store the discount
### self.buffer : store the data buffer
### self.obs_policy : uniformly sample (by default)
### self.nums_action : store the number of discrete actions that can be chosen
### self.dims_state : store the dimension of the state
#############################################################################
### self.last_obs : store the last observation which is particularly designed for append block to make
### sure that the append block's first state can match the last state in current buffer
### self.current_block_idx : store the current position of the block
#############################################################################
### self.scaler : store the scaler which should be applied to bound the state into [0,1]
#############################################################################
### self.knot : store the quantile knots for basis spline
### self.para : store the the dimension of parameter built in basis spline
#############################################################################
self.env = env
self.n = n
self.gamma = reward_dicount
self.buffer = {}
self.obs_policy = lambda S : self.env.action_space.sample()
self.nums_action = self.env.action_space.n
self.dims_state = 1
self.last_obs = np.random.normal(0,1,self.dims_state * self.n).reshape(self.n,self.dims_state)
#################################
###### move one step forward ####
#################################
def step_env(self, A):
S_next, U, done, _ = self.env.step(A)
return S_next, U, done
#################################
#### generate one trajectory ####
#################################
def gen_traj(self, evaluation = False, policy = None, seed = None, S_init = None, burn_in = None):
#############################################################################
######### OUTPUT: state, action, utility trajectory and T ###################
#############################################################################
if policy is None:
policy = self.obs_policy
## initialize the state
if seed is None and S_init is None:
S = self.env.reset()
elif seed is not None:
#np.random.seed(seed)
#random.seed(seed)
self.env.seed(seed)
S = self.env.reset()
elif S_init is not None:
S = self.env.reset(S_init)
S_traj = [S]
A_traj = []
U_traj = []
done = False
while not done:
A = policy(S)
S_next, U, done = self.step_env(A)
S_traj.append(S_next)
A_traj.append(A)
U_traj.append(U)
S = S_next # update current S as S_next
T = len(U_traj)
## output state, action, utility trajectory and T
if burn_in is None:
return [S_traj, A_traj, U_traj, T]
else:
return [S_traj[burn_in:], A_traj[burn_in:], U_traj[burn_in:], T - burn_in]
####################################
#### Store multiple trajectories ###
####################################
def gen_buffer(self, policy = None, n = None, S_init = None, burn_in = None, total_N = None): # Get observations
if total_N is None:
if n is None:
n = self.n
for i in range(n):
#self.buffer[(i)] = None
self.buffer[(i)] = self.gen_traj(policy = policy, burn_in = burn_in, S_init = S_init)
else:
count = 0
i = 0
while count < total_N:
self.buffer[(i)] = self.gen_traj(policy = policy, burn_in = burn_in, S_init = S_init)
count += self.buffer[(i)][3]
i += 1
self.n = i
self.total_N = count
#############################
#### evaluate given policy###
#############################
def evaluate_policy(self, policy, n = 20, seed = None, S_init = None, lower_b = None, upper_b = None):
output = []
A_percent = []
value = []
count = 0
for i in tqdm(range(n)): ## evaluation on n people
S, A, U, T = self.gen_traj(policy = policy, seed = seed, S_init = S_init)
est_Value = sum(map(operator.mul,
[self.gamma ** j for j in range(T)],
U))
output.append(est_Value)
A_percent.append(np.mean(A))
#value.append(np.mean(self.Q(S[0],A[0])))
value.append(0)
if lower_b or upper_b is not None:
if est_Value >= lower_b and est_Value <= upper_b:
count += 1
if lower_b or upper_b is not None:
return output, A_percent, value, count / n
else:
return output, A_percent, value
"""
our SAVE method
"""
class simulation(Agent):
def __init__(self, env, n = 50, reward_dicount = 0.5, scale = "NormCdf", product_tensor = True, DR = False):
super().__init__(env, n, reward_dicount)
self.current_block_idx = [0,1] ## [n,t]
if scale == "NormCdf":
self.scaler = normcdf()
elif scale == "Identity":
self.scaler = iden()
elif scale == "Maxmin":
self.scaler = maxmin()
elif scale == "Cliffwalk_noise":
self.scaler = cliffwalk_noise()
elif scale == "Cliffwalk":
self.scaler = cliffwalk()
self.knot = None
self.para_dim = None
self.product_tensor = product_tensor
self.DR = DR
####################################
#### generate next block ###########
####################################
def buffer_next_block(self, n_min, T_min, T, n = None, policy = None):
#### store the next block in next_block
if n is None:
n = self.n
self.K_n = n//n_min
self.K_T = T//T_min
if self.current_block_idx[0] == self.K_n and self.current_block_idx[1] == self.K_T:
self.next_block = {}
else:
self.next_block_idx = get_next_block_idx(self.current_block_idx, self.K_n, self.K_T)
self.next_block = {}
start_i, end_i, T_block = get_idx_pos(self.next_block_idx, n, T, n_min, T_min)
self.env.T = T_block
for k in range(start_i, end_i):
if policy is None:
self.next_block[k] = self.gen_traj(S_init = self.last_obs[k].copy())
else:
self.next_block[k] = self.gen_traj(S_init = self.last_obs[k].copy(), policy = policy)
self.last_obs[k] = self.env.last_ob
##################################################
#### append next block to current block ##########
##################################################
def append_next_block_to_buffer(self):
if len(self.next_block) > 0:
## update current block idx
self.current_block_idx = self.next_block_idx.copy()
self.next_block_idx = get_next_block_idx(self.current_block_idx, self.K_n, self.K_T)
## append self.next_block to self.buffer:
for key, value in self.next_block.items():
if self.buffer.get(key) is None:
self.buffer[key] = value
else:
S, A, U, t = value
self.buffer[key][0].extend(S[1:])
self.buffer[key][1].extend(A)
self.buffer[key][2].extend(U)
self.buffer[key][3] += t
#################################
#### Construct Basis Spline #####
#################################
def B_spline(self, L = 10, d = 3):
data = []
for i in range( len(self.buffer)):
data.extend(self.buffer[i][0])
scale_data = (self.scaler.transform(data))
self.knot = [np.quantile(scale_data, np.linspace(0,1,L + 1), axis=0)]
print("printing knot for bspline", self.knot)
self.bspline = []
self.para_dim = [1 if self.product_tensor else 0][0] ################ if dimension of state is more than 2, we use additive tensor ############
for i in range(self.dims_state):
tmp = []
for j in range(L - d):
cof = [0] * (L - d)
cof[j] = 1
spf = BSpline(self.knot[i], cof, d)
tmp.append(spf)
self.bspline.append(tmp)
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
self.para_dim *= len(self.bspline[i])
else:
self.para_dim += len(self.bspline[i])
########################################################################################
print("Building %d-th basis spline (total %d state dimemsion) which has %d basis " %(i, self.dims_state,len(self.bspline[i]) ))
self.para = {}
for i in range(self.nums_action):
self.para[i] = np.random.normal(0,0,self.para_dim)
self.para_2 = self.para.copy() ### 留个位置给double
def B_spline_degrade(self):
data = []
for i in range( len(self.buffer)):
data.extend(self.buffer[i][0])
scale_data = (self.scaler.transform(data))
# self.knot = [np.quantile(scale_data, np.linspace(0,1,L + 1), axis=0)]
# print("printing knot for bspline", self.knot)
self.bspline = []
self.para_dim = [1 if self.product_tensor else 0][0] ################ if dimension of state is more than 2, we use additive tensor ############
for i in range(self.dims_state):
tmp = []
for j in range(37):
def spf(x, j = j):
return (x < (j / 47) + (1/48)) * (x > (j / 47) - (1/48)) ## note: The x has been normalized
tmp.append(spf)
self.bspline.append(tmp)
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
self.para_dim *= len(self.bspline[i])
else:
self.para_dim += len(self.bspline[i])
########################################################################################
print("Building %d-th basis spline (total %d state dimemsion) which has %d basis " %(i, self.dims_state,len(self.bspline[i]) ))
self.para = {}
for i in range(self.nums_action):
self.para[i] = np.random.normal(0,0,self.para_dim)
self.para_2 = self.para.copy() ### 留个位置给double
# for j in range(48):
# print(j , self.bspline[0][j](j / 47))
##############################
###### calculate Q function ##
##############################
def Q(self, S, A, predictor = False, double = False):
## input state is original
S = [self.scaler.transform(S)]
## compute Q function
# it is used for linear regression as a predictor
############### if dimension of state is more than 2, we use additive tensor ############
## us np.prod to get the product tensor of result
if self.product_tensor:
output = list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1))))
else:
output = list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)]))
########################################################################################
if predictor:
return output
# it is used for caculating
else:
if double:
return sum(map(operator.mul, output, self.para_2[int(A)])) ## <- apply double Q!
else:
return sum(map(operator.mul, output, self.para[int(A)]))
def V(self, S, policy):
## todo sum over outside
return self.Q(S, policy(S))
def V_int(self, policy, MC_N = None):
#return integrate.dblquad(f, np.NINF, np.Inf, lambda x: np.NINF, lambda x: np.Inf)
if MC_N is None:
f = lambda y,x : self.V(policy = policy, S = (x,y)) * norm.pdf(y) * norm.pdf(x)
return integrate.dblquad(f, -5, 5, lambda x: -5, lambda x: 5)[0]
else:
# if not self.DR:
# x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)]
# else:
# print("calculationg value for DR")
# x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)]
x_list = [36] * MC_N
f = lambda x : self.V(policy = policy, S = x)
return np.mean([f(x_list[i]) for i in range(MC_N)])
# f = lambda y,x : self.V(policy = policy, S = (x,y))
# x = np.random.normal(size = MC_N)
# y = np.random.normal(size = MC_N)
# return np.mean([f(y[i],x[i]) for i in range(MC_N)])
##################################
######## update the para #########
##################################
def update_op(self, shuffle = False, batch = None, double = True, Lasso = False):
## obtain predictor and reponse
## target and and predictor(f) in Q learning which is used for for linear prediction
target = {}
f = {}
for i in range(self.nums_action):
target[i] = []
f[i] = []
## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all
if shuffle:
1
else:
print("doing UPdate")
for k in tqdm(range(len(self.buffer))):
#S_scale = self.scaler.transform(self.buffer[k][0])
S = self.buffer[k][0]
A = self.buffer[k][1]
Y = self.buffer[k][2]
T = self.buffer[k][3]
for i in range(T):
if i < T - 1:
a_star = np.argmax([self.Q(S[i + 1], j, predictor = False, double = double)
for j in range(self.nums_action)]) ## use double Q learning..
target[int(A[i])].append(Y[i] +
self.gamma * self.Q(S[i + 1], a_star, predictor = False) )
# max([self.Q(S[i + 1], i, predictor = False)
# for i in range(self.nums_action)]))
else:
target[int(A[i])].append(Y[i])
f[int(A[i])].append(self.Q(S[i],A[i], predictor = True))
## use target and f to update the parameters
self.para_2 = self.para.copy()
for i in range(self.nums_action):
if Lasso:
reg = linear_model.Lasso(alpha=0.1, fit_intercept = False)
else:
reg = LinearRegression(fit_intercept = False)
reg.fit(np.array(f[i]), np.array(target[i]))
self.para[i] = reg.coef_
def update_op_policy(self, policy, shuffle = False, batch = None):
## obtain predictor and reponse
## target and and predictor(f) in Q learning which is used for for linear prediction
target = {}
f = {}
for i in range(self.nums_action):
target[i] = []
f[i] = []
## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all
if shuffle:
1
else:
print("doing UPdate")
for k in tqdm(range(self.n)):
#S_scale = self.scaler.transform(self.buffer[k][0])
S = self.buffer[k][0]
A = self.buffer[k][1]
Y = self.buffer[k][2]
T = self.buffer[k][3]
for i in range(T):
if i < T - 1:
target[int(A[i])].append(Y[i] +
self.gamma * self.Q(S[i + 1], policy(S[i + 1]), predictor = False) )
# max([self.Q(S[i + 1], i, predictor = False)
# for i in range(self.nums_action)]))
else:
target[int(A[i])].append(Y[i])
f[int(A[i])].append(self.Q(S[i],A[i], predictor = True))
## use target and f to update the parameters
self.para_2 = self.para.copy()
for i in range(self.nums_action):
reg = LinearRegression(fit_intercept = False)
reg.fit(np.array(f[i]), np.array(target[i]))
self.para[i] = reg.coef_
########################################
######### obtain the optimal policy ####
########################################
def opt_policy(self, S, epsilon = 0.0):
# output Action
if np.random.uniform(0,1) < epsilon:
return self.obs_policy(S)
else:
return np.argmax([self.Q(S,i, predictor = False ) for i in range(self.nums_action)])
def _stretch_para(self):
self.all_para = []
for i in self.para.values():
self.all_para.extend(i)
self.all_para = np.array(self.all_para)
#############################################################################################
########################## make inference on beta ###########################################
#############################################################################################
def _Xi(self, S, A):
S = [self.scaler.transform(S)]
if A == 0:
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
return np.array(list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 3 * self.para_dim).reshape(-1,1)
else:
return np.array(list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 3 * self.para_dim).reshape(-1,1)
elif A == 1:
if self.product_tensor:
return np.array([0] * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 2 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 2 * self.para_dim).reshape(-1,1)
elif A == 2:
if self.product_tensor:
return np.array([0] * 2 * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 1 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * 2 * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 1 * self.para_dim).reshape(-1,1)
elif A == 3:
if self.product_tensor:
return np.array([0] * 3 * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 0 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * 3 * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 0 * self.para_dim).reshape(-1,1)
#############################################################################################
def _U(self, S, policy):
## todo: need to change to random
return self._Xi(S, policy(S))
def _Sigma(self, policy, block = False):
output = np.zeros((self.para_dim * self.nums_action, self.para_dim * self.nums_action))
output_2 = np.zeros((self.para_dim * self.nums_action, 1))
total_T = 0
if not block:
for i in tqdm(self.buffer.keys()):
T = self.buffer[i][3]
total_T += T
for j in range(T):
S = self.buffer[i][0][j]
S_next = self.buffer[i][0][j + 1]
A = self.buffer[i][1][j]
Y = self.buffer[i][2][j]
if Y < -10:
## deal with terminate state which Y == -100
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A)).T))
else:
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
# output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
output_2 += Y * self._Xi(S,A)
else:
for i in self.next_block.keys():
T = self.next_block[i][3]
total_T += T
for j in range(T):
S = self.next_block[i][0][j] ## do the inference on the next_block (SAVE!)
S_next = self.next_block[i][0][j + 1]
A = self.next_block[i][1][j]
Y = self.next_block[i][2][j]
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
#output_2 += Y * self._Xi(S,A) !!
output_2 += Y * self._Xi(S,A)
self.total_T = total_T
self.Sigma_hat = output / total_T
#if not block: !!
# self.vector = output_2 / total_T
self.vector = output_2 / total_T
def _beta_hat(self, policy, block = False):
self._Sigma(policy, block = block)
self.inv_Sigma_hat = inv(self.Sigma_hat)
#if not block: !!
# self.est_beta = np.matmul(self.inv_Sigma_hat, self.vector)
self.est_beta = np.matmul(self.inv_Sigma_hat, self.vector)
## store the estimated beta in self.para
def _store_para(self, est_beta):
for i in range(self.nums_action):
self.para[i] = self.est_beta[ i * self.para_dim : (i + 1)* self.para_dim].reshape(-1)
def _Omega_hat(self, policy, block = False):
self._beta_hat(policy, block = block)
self._store_para(self.est_beta)
output = np.zeros((self.para_dim * self.nums_action, self.para_dim * self.nums_action))
if not block:
for i in self.buffer.keys():
T = self.buffer[i][3]
for j in range(T - 1):
S = self.buffer[i][0][j]
S_next = self.buffer[i][0][j + 1]
A = self.buffer[i][1][j]
U = self.buffer[i][2][j]
Xi = self._Xi(S,A)
if U < -10:
output += ((U - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
output += ((U + self.gamma * (self.V(S_next, policy)) - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
## if block is true, we use the data in next_block to obtain CI
for i in self.next_block.keys():
T = self.next_block[i][3]
for j in range(T - 1):
S = self.next_block[i][0][j]
S_next = self.next_block[i][0][j + 1]
A = self.next_block[i][1][j]
U = self.next_block[i][2][j]
Xi = self._Xi(S, A)
if U < -10:
output += ((U - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
output += ((U + self.gamma * (self.V(S_next, policy)) - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
self.Omega = output / self.total_T
#### for S_init individual
def _sigma(self, policy, S, block = False):
self._Omega_hat(policy, block = block)
self.sigma2 = reduce(np.matmul, [self._U(S, policy).T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, self._U(S, policy)])
def inference(self, policy, S, alpha = 0.05, block = False):
self._sigma(policy, S, block = block) ## estimate the beta
V = self.V(S, policy)
return V - norm.ppf(1 - alpha/2) * self.sigma2 ** 0.5 / (self.total_T ** 0.5), V + norm.ppf(1 - alpha/2) * self.sigma2 ** 0.5 / (self.total_T ** 0.5)
#################################################################################################
##### for S_init with integration (S init is a distribution other than a fixed point) ###########
#################################################################################################
def _sigma_int(self, policy, block = False, U_int_store = "U_int_store", MC_N = None):
print("start calculating Omega....")
self._Omega_hat(policy, block = block)
print("start extracting U....")
## get U int from pickle file!
if U_int_store is None:
if MC_N is None:
raise ValueError("NEED MC_N is NOT None..")
U_int = []
# x = np.random.normal(size = MC_N)
# y = np.random.normal(size = MC_N)
# print(self.DR)
# if not self.DR:
# x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)]
# else:
# print("calculationg sigma for DR")
# x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)]
print("initial is always 36 for cliffwalk")
x_list = [36] * MC_N
f = lambda x : self._U(policy = policy, S = x)
for ele in range(self.para_dim * self.nums_action):
print("integrating para %d, total number of parameters is %d*%d"% (ele, self.nums_action, self.para_dim))
U_int.append(np.mean([f(x_list[i])[ele] for i in range(MC_N)]))
U_int = np.array(U_int)
else:
filename = U_int_store
outfile = open(filename,'rb')
U_int = np.array(pickle.load(outfile)[int(self.para_dim**0.5)]).reshape(-1,1)
outfile.close()
## get sigma2
print("start obtaining sigma2....")
self.sigma2 = reduce(np.matmul, [U_int.T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, U_int])
print("sigma2", self.sigma2)
# print(U_int.T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, U_int)
def inference_int(self, policy, alpha = 0.05, U_int_store = None, block = False, MC_N = 10000, fitted_Q = False):
############################################################################################################
##### Note 1 : MC_N = None : we use built-in function to get numerical integration for V
##### MC_N = 10000 : we use MC to get numerical integration for V
##### Note 2 : U_int_store = "U_int_store" : we use stored U to calculate U
##### U_int_store = None : we use MC to get numerical integration for U <-- it need MC is not None
##### Note 3 : fitted_Q = False : we use LSE to re-calculate the self.para
##### fitted_Q = True : we use current stored self.para (according to the main_est*, it is fitted-Q).
##### <-- wrong!! fitted_Q should always be False ! depreciated!!
############################################################################################################
self._sigma_int(policy, U_int_store = U_int_store, block = block, MC_N = MC_N)
print("start getting V value (slow.. need to numerical integration)....")
start = time.time()
V = self.V_int(policy, MC_N)
print("Finshed! cost %d time" % (time.time() - start))
return V - norm.ppf(1 - alpha/2) * (self.sigma2 ** 0.5) / (self.total_T ** 0.5), V + norm.ppf(1 - alpha/2) * (self.sigma2 ** 0.5) / (self.total_T ** 0.5)
| from .simulator import *
from .agent_utility import *
import operator
from itertools import product
from itertools import accumulate
import numpy as np
import random
import pickle
import os.path
import time
from scipy.interpolate import BSpline
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
from numpy.linalg import inv
from functools import reduce
from scipy.stats import norm
from scipy import integrate
from scipy.stats import norm
from tqdm import tqdm
"""
Totally tailed to cliff walking
1. modify the Action space (xi dimension)
2.
"""
class Agent(object):
def __init__(self, env, n = 50, reward_dicount = 0.5):
#############################################################################
#############################################################################
### self.env : store the dynamic environment
### self.n : store the number of patients(objects)
### self.gamma : store the discount
### self.buffer : store the data buffer
### self.obs_policy : uniformly sample (by default)
### self.nums_action : store the number of discrete actions that can be chosen
### self.dims_state : store the dimension of the state
#############################################################################
### self.last_obs : store the last observation which is particularly designed for append block to make
### sure that the append block's first state can match the last state in current buffer
### self.current_block_idx : store the current position of the block
#############################################################################
### self.scaler : store the scaler which should be applied to bound the state into [0,1]
#############################################################################
### self.knot : store the quantile knots for basis spline
### self.para : store the the dimension of parameter built in basis spline
#############################################################################
self.env = env
self.n = n
self.gamma = reward_dicount
self.buffer = {}
self.obs_policy = lambda S : self.env.action_space.sample()
self.nums_action = self.env.action_space.n
self.dims_state = 1
self.last_obs = np.random.normal(0,1,self.dims_state * self.n).reshape(self.n,self.dims_state)
#################################
###### move one step forward ####
#################################
def step_env(self, A):
S_next, U, done, _ = self.env.step(A)
return S_next, U, done
#################################
#### generate one trajectory ####
#################################
def gen_traj(self, evaluation = False, policy = None, seed = None, S_init = None, burn_in = None):
#############################################################################
######### OUTPUT: state, action, utility trajectory and T ###################
#############################################################################
if policy is None:
policy = self.obs_policy
## initialize the state
if seed is None and S_init is None:
S = self.env.reset()
elif seed is not None:
#np.random.seed(seed)
#random.seed(seed)
self.env.seed(seed)
S = self.env.reset()
elif S_init is not None:
S = self.env.reset(S_init)
S_traj = [S]
A_traj = []
U_traj = []
done = False
while not done:
A = policy(S)
S_next, U, done = self.step_env(A)
S_traj.append(S_next)
A_traj.append(A)
U_traj.append(U)
S = S_next # update current S as S_next
T = len(U_traj)
## output state, action, utility trajectory and T
if burn_in is None:
return [S_traj, A_traj, U_traj, T]
else:
return [S_traj[burn_in:], A_traj[burn_in:], U_traj[burn_in:], T - burn_in]
####################################
#### Store multiple trajectories ###
####################################
def gen_buffer(self, policy = None, n = None, S_init = None, burn_in = None, total_N = None): # Get observations
if total_N is None:
if n is None:
n = self.n
for i in range(n):
#self.buffer[(i)] = None
self.buffer[(i)] = self.gen_traj(policy = policy, burn_in = burn_in, S_init = S_init)
else:
count = 0
i = 0
while count < total_N:
self.buffer[(i)] = self.gen_traj(policy = policy, burn_in = burn_in, S_init = S_init)
count += self.buffer[(i)][3]
i += 1
self.n = i
self.total_N = count
#############################
#### evaluate given policy###
#############################
def evaluate_policy(self, policy, n = 20, seed = None, S_init = None, lower_b = None, upper_b = None):
output = []
A_percent = []
value = []
count = 0
for i in tqdm(range(n)): ## evaluation on n people
S, A, U, T = self.gen_traj(policy = policy, seed = seed, S_init = S_init)
est_Value = sum(map(operator.mul,
[self.gamma ** j for j in range(T)],
U))
output.append(est_Value)
A_percent.append(np.mean(A))
#value.append(np.mean(self.Q(S[0],A[0])))
value.append(0)
if lower_b or upper_b is not None:
if est_Value >= lower_b and est_Value <= upper_b:
count += 1
if lower_b or upper_b is not None:
return output, A_percent, value, count / n
else:
return output, A_percent, value
"""
our SAVE method
"""
class simulation(Agent):
def __init__(self, env, n = 50, reward_dicount = 0.5, scale = "NormCdf", product_tensor = True, DR = False):
super().__init__(env, n, reward_dicount)
self.current_block_idx = [0,1] ## [n,t]
if scale == "NormCdf":
self.scaler = normcdf()
elif scale == "Identity":
self.scaler = iden()
elif scale == "Maxmin":
self.scaler = maxmin()
elif scale == "Cliffwalk_noise":
self.scaler = cliffwalk_noise()
elif scale == "Cliffwalk":
self.scaler = cliffwalk()
self.knot = None
self.para_dim = None
self.product_tensor = product_tensor
self.DR = DR
####################################
#### generate next block ###########
####################################
def buffer_next_block(self, n_min, T_min, T, n = None, policy = None):
#### store the next block in next_block
if n is None:
n = self.n
self.K_n = n//n_min
self.K_T = T//T_min
if self.current_block_idx[0] == self.K_n and self.current_block_idx[1] == self.K_T:
self.next_block = {}
else:
self.next_block_idx = get_next_block_idx(self.current_block_idx, self.K_n, self.K_T)
self.next_block = {}
start_i, end_i, T_block = get_idx_pos(self.next_block_idx, n, T, n_min, T_min)
self.env.T = T_block
for k in range(start_i, end_i):
if policy is None:
self.next_block[k] = self.gen_traj(S_init = self.last_obs[k].copy())
else:
self.next_block[k] = self.gen_traj(S_init = self.last_obs[k].copy(), policy = policy)
self.last_obs[k] = self.env.last_ob
##################################################
#### append next block to current block ##########
##################################################
def append_next_block_to_buffer(self):
if len(self.next_block) > 0:
## update current block idx
self.current_block_idx = self.next_block_idx.copy()
self.next_block_idx = get_next_block_idx(self.current_block_idx, self.K_n, self.K_T)
## append self.next_block to self.buffer:
for key, value in self.next_block.items():
if self.buffer.get(key) is None:
self.buffer[key] = value
else:
S, A, U, t = value
self.buffer[key][0].extend(S[1:])
self.buffer[key][1].extend(A)
self.buffer[key][2].extend(U)
self.buffer[key][3] += t
#################################
#### Construct Basis Spline #####
#################################
def B_spline(self, L = 10, d = 3):
data = []
for i in range( len(self.buffer)):
data.extend(self.buffer[i][0])
scale_data = (self.scaler.transform(data))
self.knot = [np.quantile(scale_data, np.linspace(0,1,L + 1), axis=0)]
print("printing knot for bspline", self.knot)
self.bspline = []
self.para_dim = [1 if self.product_tensor else 0][0] ################ if dimension of state is more than 2, we use additive tensor ############
for i in range(self.dims_state):
tmp = []
for j in range(L - d):
cof = [0] * (L - d)
cof[j] = 1
spf = BSpline(self.knot[i], cof, d)
tmp.append(spf)
self.bspline.append(tmp)
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
self.para_dim *= len(self.bspline[i])
else:
self.para_dim += len(self.bspline[i])
########################################################################################
print("Building %d-th basis spline (total %d state dimemsion) which has %d basis " %(i, self.dims_state,len(self.bspline[i]) ))
self.para = {}
for i in range(self.nums_action):
self.para[i] = np.random.normal(0,0,self.para_dim)
self.para_2 = self.para.copy() ### 留个位置给double
def B_spline_degrade(self):
data = []
for i in range( len(self.buffer)):
data.extend(self.buffer[i][0])
scale_data = (self.scaler.transform(data))
# self.knot = [np.quantile(scale_data, np.linspace(0,1,L + 1), axis=0)]
# print("printing knot for bspline", self.knot)
self.bspline = []
self.para_dim = [1 if self.product_tensor else 0][0] ################ if dimension of state is more than 2, we use additive tensor ############
for i in range(self.dims_state):
tmp = []
for j in range(37):
def spf(x, j = j):
return (x < (j / 47) + (1/48)) * (x > (j / 47) - (1/48)) ## note: The x has been normalized
tmp.append(spf)
self.bspline.append(tmp)
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
self.para_dim *= len(self.bspline[i])
else:
self.para_dim += len(self.bspline[i])
########################################################################################
print("Building %d-th basis spline (total %d state dimemsion) which has %d basis " %(i, self.dims_state,len(self.bspline[i]) ))
self.para = {}
for i in range(self.nums_action):
self.para[i] = np.random.normal(0,0,self.para_dim)
self.para_2 = self.para.copy() ### 留个位置给double
# for j in range(48):
# print(j , self.bspline[0][j](j / 47))
##############################
###### calculate Q function ##
##############################
def Q(self, S, A, predictor = False, double = False):
## input state is original
S = [self.scaler.transform(S)]
## compute Q function
# it is used for linear regression as a predictor
############### if dimension of state is more than 2, we use additive tensor ############
## us np.prod to get the product tensor of result
if self.product_tensor:
output = list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1))))
else:
output = list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)]))
########################################################################################
if predictor:
return output
# it is used for caculating
else:
if double:
return sum(map(operator.mul, output, self.para_2[int(A)])) ## <- apply double Q!
else:
return sum(map(operator.mul, output, self.para[int(A)]))
def V(self, S, policy):
## todo sum over outside
return self.Q(S, policy(S))
def V_int(self, policy, MC_N = None):
#return integrate.dblquad(f, np.NINF, np.Inf, lambda x: np.NINF, lambda x: np.Inf)
if MC_N is None:
f = lambda y,x : self.V(policy = policy, S = (x,y)) * norm.pdf(y) * norm.pdf(x)
return integrate.dblquad(f, -5, 5, lambda x: -5, lambda x: 5)[0]
else:
# if not self.DR:
# x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)]
# else:
# print("calculationg value for DR")
# x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)]
x_list = [36] * MC_N
f = lambda x : self.V(policy = policy, S = x)
return np.mean([f(x_list[i]) for i in range(MC_N)])
# f = lambda y,x : self.V(policy = policy, S = (x,y))
# x = np.random.normal(size = MC_N)
# y = np.random.normal(size = MC_N)
# return np.mean([f(y[i],x[i]) for i in range(MC_N)])
##################################
######## update the para #########
##################################
def update_op(self, shuffle = False, batch = None, double = True, Lasso = False):
## obtain predictor and reponse
## target and and predictor(f) in Q learning which is used for for linear prediction
target = {}
f = {}
for i in range(self.nums_action):
target[i] = []
f[i] = []
## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all
if shuffle:
1
else:
print("doing UPdate")
for k in tqdm(range(len(self.buffer))):
#S_scale = self.scaler.transform(self.buffer[k][0])
S = self.buffer[k][0]
A = self.buffer[k][1]
Y = self.buffer[k][2]
T = self.buffer[k][3]
for i in range(T):
if i < T - 1:
a_star = np.argmax([self.Q(S[i + 1], j, predictor = False, double = double)
for j in range(self.nums_action)]) ## use double Q learning..
target[int(A[i])].append(Y[i] +
self.gamma * self.Q(S[i + 1], a_star, predictor = False) )
# max([self.Q(S[i + 1], i, predictor = False)
# for i in range(self.nums_action)]))
else:
target[int(A[i])].append(Y[i])
f[int(A[i])].append(self.Q(S[i],A[i], predictor = True))
## use target and f to update the parameters
self.para_2 = self.para.copy()
for i in range(self.nums_action):
if Lasso:
reg = linear_model.Lasso(alpha=0.1, fit_intercept = False)
else:
reg = LinearRegression(fit_intercept = False)
reg.fit(np.array(f[i]), np.array(target[i]))
self.para[i] = reg.coef_
def update_op_policy(self, policy, shuffle = False, batch = None):
## obtain predictor and reponse
## target and and predictor(f) in Q learning which is used for for linear prediction
target = {}
f = {}
for i in range(self.nums_action):
target[i] = []
f[i] = []
## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all
if shuffle:
1
else:
print("doing UPdate")
for k in tqdm(range(self.n)):
#S_scale = self.scaler.transform(self.buffer[k][0])
S = self.buffer[k][0]
A = self.buffer[k][1]
Y = self.buffer[k][2]
T = self.buffer[k][3]
for i in range(T):
if i < T - 1:
target[int(A[i])].append(Y[i] +
self.gamma * self.Q(S[i + 1], policy(S[i + 1]), predictor = False) )
# max([self.Q(S[i + 1], i, predictor = False)
# for i in range(self.nums_action)]))
else:
target[int(A[i])].append(Y[i])
f[int(A[i])].append(self.Q(S[i],A[i], predictor = True))
## use target and f to update the parameters
self.para_2 = self.para.copy()
for i in range(self.nums_action):
reg = LinearRegression(fit_intercept = False)
reg.fit(np.array(f[i]), np.array(target[i]))
self.para[i] = reg.coef_
########################################
######### obtain the optimal policy ####
########################################
def opt_policy(self, S, epsilon = 0.0):
# output Action
if np.random.uniform(0,1) < epsilon:
return self.obs_policy(S)
else:
return np.argmax([self.Q(S,i, predictor = False ) for i in range(self.nums_action)])
def _stretch_para(self):
self.all_para = []
for i in self.para.values():
self.all_para.extend(i)
self.all_para = np.array(self.all_para)
#############################################################################################
########################## make inference on beta ###########################################
#############################################################################################
def _Xi(self, S, A):
S = [self.scaler.transform(S)]
if A == 0:
############### if dimension of state is more than 2, we use additive tensor ############
if self.product_tensor:
return np.array(list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 3 * self.para_dim).reshape(-1,1)
else:
return np.array(list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 3 * self.para_dim).reshape(-1,1)
elif A == 1:
if self.product_tensor:
return np.array([0] * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 2 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 2 * self.para_dim).reshape(-1,1)
elif A == 2:
if self.product_tensor:
return np.array([0] * 2 * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 1 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * 2 * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 1 * self.para_dim).reshape(-1,1)
elif A == 3:
if self.product_tensor:
return np.array([0] * 3 * self.para_dim + list(map(np.prod,(product(*[np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)],repeat=1)))) + [0] * 0 * self.para_dim).reshape(-1,1)
else:
return np.array([0] * 3 * self.para_dim + list(np.concatenate([np.array([func(s) for func in f]) for f,s in zip(self.bspline, S)])) + [0] * 0 * self.para_dim).reshape(-1,1)
#############################################################################################
def _U(self, S, policy):
## todo: need to change to random
return self._Xi(S, policy(S))
def _Sigma(self, policy, block = False):
output = np.zeros((self.para_dim * self.nums_action, self.para_dim * self.nums_action))
output_2 = np.zeros((self.para_dim * self.nums_action, 1))
total_T = 0
if not block:
for i in tqdm(self.buffer.keys()):
T = self.buffer[i][3]
total_T += T
for j in range(T):
S = self.buffer[i][0][j]
S_next = self.buffer[i][0][j + 1]
A = self.buffer[i][1][j]
Y = self.buffer[i][2][j]
if Y < -10:
## deal with terminate state which Y == -100
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A)).T))
else:
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
# output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
output_2 += Y * self._Xi(S,A)
else:
for i in self.next_block.keys():
T = self.next_block[i][3]
total_T += T
for j in range(T):
S = self.next_block[i][0][j] ## do the inference on the next_block (SAVE!)
S_next = self.next_block[i][0][j + 1]
A = self.next_block[i][1][j]
Y = self.next_block[i][2][j]
output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T))
#output_2 += Y * self._Xi(S,A) !!
output_2 += Y * self._Xi(S,A)
self.total_T = total_T
self.Sigma_hat = output / total_T
#if not block: !!
# self.vector = output_2 / total_T
self.vector = output_2 / total_T
def _beta_hat(self, policy, block = False):
self._Sigma(policy, block = block)
self.inv_Sigma_hat = inv(self.Sigma_hat)
#if not block: !!
# self.est_beta = np.matmul(self.inv_Sigma_hat, self.vector)
self.est_beta = np.matmul(self.inv_Sigma_hat, self.vector)
## store the estimated beta in self.para
def _store_para(self, est_beta):
for i in range(self.nums_action):
self.para[i] = self.est_beta[ i * self.para_dim : (i + 1)* self.para_dim].reshape(-1)
def _Omega_hat(self, policy, block = False):
self._beta_hat(policy, block = block)
self._store_para(self.est_beta)
output = np.zeros((self.para_dim * self.nums_action, self.para_dim * self.nums_action))
if not block:
for i in self.buffer.keys():
T = self.buffer[i][3]
for j in range(T - 1):
S = self.buffer[i][0][j]
S_next = self.buffer[i][0][j + 1]
A = self.buffer[i][1][j]
U = self.buffer[i][2][j]
Xi = self._Xi(S,A)
if U < -10:
output += ((U - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
output += ((U + self.gamma * (self.V(S_next, policy)) - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
## if block is true, we use the data in next_block to obtain CI
for i in self.next_block.keys():
T = self.next_block[i][3]
for j in range(T - 1):
S = self.next_block[i][0][j]
S_next = self.next_block[i][0][j + 1]
A = self.next_block[i][1][j]
U = self.next_block[i][2][j]
Xi = self._Xi(S, A)
if U < -10:
output += ((U - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
else:
output += ((U + self.gamma * (self.V(S_next, policy)) - (self.Q(S, A)))**2) * np.matmul(Xi, Xi.T)
self.Omega = output / self.total_T
#### for S_init individual
def _sigma(self, policy, S, block = False):
self._Omega_hat(policy, block = block)
self.sigma2 = reduce(np.matmul, [self._U(S, policy).T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, self._U(S, policy)])
def inference(self, policy, S, alpha = 0.05, block = False):
self._sigma(policy, S, block = block) ## estimate the beta
V = self.V(S, policy)
return V - norm.ppf(1 - alpha/2) * self.sigma2 ** 0.5 / (self.total_T ** 0.5), V + norm.ppf(1 - alpha/2) * self.sigma2 ** 0.5 / (self.total_T ** 0.5)
#################################################################################################
##### for S_init with integration (S init is a distribution other than a fixed point) ###########
#################################################################################################
def _sigma_int(self, policy, block = False, U_int_store = "U_int_store", MC_N = None):
print("start calculating Omega....")
self._Omega_hat(policy, block = block)
print("start extracting U....")
## get U int from pickle file!
if U_int_store is None:
if MC_N is None:
raise ValueError("NEED MC_N is NOT None..")
U_int = []
# x = np.random.normal(size = MC_N)
# y = np.random.normal(size = MC_N)
# print(self.DR)
# if not self.DR:
# x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)]
# else:
# print("calculationg sigma for DR")
# x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)]
print("initial is always 36 for cliffwalk")
x_list = [36] * MC_N
f = lambda x : self._U(policy = policy, S = x)
for ele in range(self.para_dim * self.nums_action):
print("integrating para %d, total number of parameters is %d*%d"% (ele, self.nums_action, self.para_dim))
U_int.append(np.mean([f(x_list[i])[ele] for i in range(MC_N)]))
U_int = np.array(U_int)
else:
filename = U_int_store
outfile = open(filename,'rb')
U_int = np.array(pickle.load(outfile)[int(self.para_dim**0.5)]).reshape(-1,1)
outfile.close()
## get sigma2
print("start obtaining sigma2....")
self.sigma2 = reduce(np.matmul, [U_int.T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, U_int])
print("sigma2", self.sigma2)
# print(U_int.T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, U_int)
def inference_int(self, policy, alpha = 0.05, U_int_store = None, block = False, MC_N = 10000, fitted_Q = False):
############################################################################################################
##### Note 1 : MC_N = None : we use built-in function to get numerical integration for V
##### MC_N = 10000 : we use MC to get numerical integration for V
##### Note 2 : U_int_store = "U_int_store" : we use stored U to calculate U
##### U_int_store = None : we use MC to get numerical integration for U <-- it need MC is not None
##### Note 3 : fitted_Q = False : we use LSE to re-calculate the self.para
##### fitted_Q = True : we use current stored self.para (according to the main_est*, it is fitted-Q).
##### <-- wrong!! fitted_Q should always be False ! depreciated!!
############################################################################################################
self._sigma_int(policy, U_int_store = U_int_store, block = block, MC_N = MC_N)
print("start getting V value (slow.. need to numerical integration)....")
start = time.time()
V = self.V_int(policy, MC_N)
print("Finshed! cost %d time" % (time.time() - start))
return V - norm.ppf(1 - alpha/2) * (self.sigma2 ** 0.5) / (self.total_T ** 0.5), V + norm.ppf(1 - alpha/2) * (self.sigma2 ** 0.5) / (self.total_T ** 0.5) | de | 0.309393 | Totally tailed to cliff walking 1. modify the Action space (xi dimension) 2. ############################################################################# ############################################################################# ### self.env : store the dynamic environment ### self.n : store the number of patients(objects) ### self.gamma : store the discount ### self.buffer : store the data buffer ### self.obs_policy : uniformly sample (by default) ### self.nums_action : store the number of discrete actions that can be chosen ### self.dims_state : store the dimension of the state ############################################################################# ### self.last_obs : store the last observation which is particularly designed for append block to make ### sure that the append block's first state can match the last state in current buffer ### self.current_block_idx : store the current position of the block ############################################################################# ### self.scaler : store the scaler which should be applied to bound the state into [0,1] ############################################################################# ### self.knot : store the quantile knots for basis spline ### self.para : store the the dimension of parameter built in basis spline ############################################################################# ################################# ###### move one step forward #### ################################# ################################# #### generate one trajectory #### ################################# ############################################################################# ######### OUTPUT: state, action, utility trajectory and T ################### ############################################################################# ## initialize the state #np.random.seed(seed) #random.seed(seed) # update current S as S_next ## output state, action, utility trajectory and T #################################### #### Store multiple trajectories ### #################################### # Get observations #self.buffer[(i)] = None ############################# #### evaluate given policy### ############################# ## evaluation on n people #value.append(np.mean(self.Q(S[0],A[0]))) our SAVE method ## [n,t] #################################### #### generate next block ########### #################################### #### store the next block in next_block ################################################## #### append next block to current block ########## ################################################## ## update current block idx ## append self.next_block to self.buffer: ################################# #### Construct Basis Spline ##### ################################# ################ if dimension of state is more than 2, we use additive tensor ############ ############### if dimension of state is more than 2, we use additive tensor ############ ######################################################################################## ### 留个位置给double # self.knot = [np.quantile(scale_data, np.linspace(0,1,L + 1), axis=0)] # print("printing knot for bspline", self.knot) ################ if dimension of state is more than 2, we use additive tensor ############ ## note: The x has been normalized ############### if dimension of state is more than 2, we use additive tensor ############ ######################################################################################## ### 留个位置给double # for j in range(48): # print(j , self.bspline[0][j](j / 47)) ############################## ###### calculate Q function ## ############################## ## input state is original ## compute Q function # it is used for linear regression as a predictor ############### if dimension of state is more than 2, we use additive tensor ############ ## us np.prod to get the product tensor of result ######################################################################################## # it is used for caculating ## <- apply double Q! ## todo sum over outside #return integrate.dblquad(f, np.NINF, np.Inf, lambda x: np.NINF, lambda x: np.Inf) # if not self.DR: # x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)] # else: # print("calculationg value for DR") # x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)] # f = lambda y,x : self.V(policy = policy, S = (x,y)) # x = np.random.normal(size = MC_N) # y = np.random.normal(size = MC_N) # return np.mean([f(y[i],x[i]) for i in range(MC_N)]) ################################## ######## update the para ######### ################################## ## obtain predictor and reponse ## target and and predictor(f) in Q learning which is used for for linear prediction ## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all #S_scale = self.scaler.transform(self.buffer[k][0]) ## use double Q learning.. # max([self.Q(S[i + 1], i, predictor = False) # for i in range(self.nums_action)])) ## use target and f to update the parameters ## obtain predictor and reponse ## target and and predictor(f) in Q learning which is used for for linear prediction ## shuffle the buffer: if true shuffle the order, other wise don't and apply linear regression to all #S_scale = self.scaler.transform(self.buffer[k][0]) # max([self.Q(S[i + 1], i, predictor = False) # for i in range(self.nums_action)])) ## use target and f to update the parameters ######################################## ######### obtain the optimal policy #### ######################################## # output Action ############################################################################################# ########################## make inference on beta ########################################### ############################################################################################# ############### if dimension of state is more than 2, we use additive tensor ############ ############################################################################################# ## todo: need to change to random ## deal with terminate state which Y == -100 # output += (np.matmul( self._Xi(S, A) , (self._Xi(S, A) - self.gamma * self._U(S_next, policy = policy)).T)) ## do the inference on the next_block (SAVE!) #output_2 += Y * self._Xi(S,A) !! #if not block: !! # self.vector = output_2 / total_T #if not block: !! # self.est_beta = np.matmul(self.inv_Sigma_hat, self.vector) ## store the estimated beta in self.para ## if block is true, we use the data in next_block to obtain CI #### for S_init individual ## estimate the beta ################################################################################################# ##### for S_init with integration (S init is a distribution other than a fixed point) ########### ################################################################################################# ## get U int from pickle file! # x = np.random.normal(size = MC_N) # y = np.random.normal(size = MC_N) # print(self.DR) # if not self.DR: # x_list = [np.random.normal(size = MC_N) for _ in range(self.dims_state)] # else: # print("calculationg sigma for DR") # x_list = [np.random.normal(0.5, 0.2, MC_N) for _ in range(self.dims_state)] ## get sigma2 # print(U_int.T, self.inv_Sigma_hat, self.Omega, self.inv_Sigma_hat.T, U_int) ############################################################################################################ ##### Note 1 : MC_N = None : we use built-in function to get numerical integration for V ##### MC_N = 10000 : we use MC to get numerical integration for V ##### Note 2 : U_int_store = "U_int_store" : we use stored U to calculate U ##### U_int_store = None : we use MC to get numerical integration for U <-- it need MC is not None ##### Note 3 : fitted_Q = False : we use LSE to re-calculate the self.para ##### fitted_Q = True : we use current stored self.para (according to the main_est*, it is fitted-Q). ##### <-- wrong!! fitted_Q should always be False ! depreciated!! ############################################################################################################ | 2.412136 | 2 |
ariadne/__init__.py | microns-ariadne/ariadne-pipeline-test-harness | 2 | 6614629 | <filename>ariadne/__init__.py
# init file for the ariadne package.
import tools
import plugin
import plugingen
import pipeline
import deftools
import luigitools
| <filename>ariadne/__init__.py
# init file for the ariadne package.
import tools
import plugin
import plugingen
import pipeline
import deftools
import luigitools
| en | 0.634119 | # init file for the ariadne package. | 1.205548 | 1 |
webserver/json_encoder.py | Maveo/Spark | 2 | 6614630 | <gh_stars>1-10
import json
from typing import TYPE_CHECKING
import discord
from flask.json import JSONEncoder
from discord import Member, ClientUser, User, Guild, Invite, TextChannel, VoiceChannel, Message, Permissions
from helpers.db import InventoryItemType, WheelspinProbability
from helpers.dummys import RoleDummy, MemberDummy
from datetime import datetime
from imagestack_svg.imageresolve import ImageStackResolveString
from helpers.spark_module import SparkModule
if TYPE_CHECKING:
from bot import DiscordBot
def create_json_encoder(bot: 'DiscordBot'):
class DiscordJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return datetime.timestamp(o)*1000
if isinstance(o, Member) or isinstance(o, MemberDummy):
return {
'id': str(o.id),
'tag': str(o.discriminator),
'nick': str(o.display_name),
'name': str(o.name),
'avatar_url': str(o.display_avatar),
'top_role': str(o.top_role.name),
}
if isinstance(o, ClientUser) or isinstance(o, User):
return {
'id': str(o.id),
'nick': str(o.display_name),
'name': str(o.name),
'avatar_url': str(o.display_avatar),
}
if isinstance(o, Guild):
icon_url = o.icon
if icon_url is not None:
icon_url = str(icon_url)
return {
'id': str(o.id),
'name': str(o.name),
'icon_url': icon_url,
'active_modules': bot.module_manager.get_activated_modules(o.id)
}
if isinstance(o, Invite):
return {
'channel': o.channel,
'code': o.code,
'inviter': o.inviter,
'max_age': o.max_age,
'max_uses': o.max_uses,
'revoked': o.revoked,
'temporary': o.temporary,
'url': o.url,
'uses': o.uses,
}
if isinstance(o, Permissions):
return {perm: getattr(o, perm) for perm in discord.Permissions.VALID_FLAGS.keys()}
if isinstance(o, TextChannel) or isinstance(o, VoiceChannel):
return {
'id': str(o.id),
'name': str(o.name),
}
if isinstance(o, Message):
return {
'id': str(o.id),
'author': o.author,
'content': str(o.clean_content),
'created_at': o.created_at,
}
if isinstance(o, ImageStackResolveString):
return str(o)
if isinstance(o, SparkModule):
return {
'name': o.name,
'title': o.title,
'description': o.description,
'dependencies': o.dependencies,
'dependency_for': o.dependency_for,
'is_optional': o.optional
}
if isinstance(o, SparkModule):
return {
'name': o.name,
'title': o.title,
'description': o.description,
'dependencies': o.dependencies,
'dependency_for': o.dependency_for,
'is_optional': o.optional
}
if isinstance(o, InventoryItemType):
return {
'id': o.id,
'name': o.name,
'rarity_id': o.rarity_id,
'always_visible': o.always_visible,
'tradable': o.tradable,
'equippable': o.equippable,
'useable': o.useable,
'actions': json.loads(o.actions),
}
if isinstance(o, WheelspinProbability):
return {
'id': o.id,
'item_type_id': o.item_type_id,
'probability': o.probability,
'amount': o.amount,
'sound': o.sound,
}
return JSONEncoder.default(self, o)
return DiscordJSONEncoder
| import json
from typing import TYPE_CHECKING
import discord
from flask.json import JSONEncoder
from discord import Member, ClientUser, User, Guild, Invite, TextChannel, VoiceChannel, Message, Permissions
from helpers.db import InventoryItemType, WheelspinProbability
from helpers.dummys import RoleDummy, MemberDummy
from datetime import datetime
from imagestack_svg.imageresolve import ImageStackResolveString
from helpers.spark_module import SparkModule
if TYPE_CHECKING:
from bot import DiscordBot
def create_json_encoder(bot: 'DiscordBot'):
class DiscordJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return datetime.timestamp(o)*1000
if isinstance(o, Member) or isinstance(o, MemberDummy):
return {
'id': str(o.id),
'tag': str(o.discriminator),
'nick': str(o.display_name),
'name': str(o.name),
'avatar_url': str(o.display_avatar),
'top_role': str(o.top_role.name),
}
if isinstance(o, ClientUser) or isinstance(o, User):
return {
'id': str(o.id),
'nick': str(o.display_name),
'name': str(o.name),
'avatar_url': str(o.display_avatar),
}
if isinstance(o, Guild):
icon_url = o.icon
if icon_url is not None:
icon_url = str(icon_url)
return {
'id': str(o.id),
'name': str(o.name),
'icon_url': icon_url,
'active_modules': bot.module_manager.get_activated_modules(o.id)
}
if isinstance(o, Invite):
return {
'channel': o.channel,
'code': o.code,
'inviter': o.inviter,
'max_age': o.max_age,
'max_uses': o.max_uses,
'revoked': o.revoked,
'temporary': o.temporary,
'url': o.url,
'uses': o.uses,
}
if isinstance(o, Permissions):
return {perm: getattr(o, perm) for perm in discord.Permissions.VALID_FLAGS.keys()}
if isinstance(o, TextChannel) or isinstance(o, VoiceChannel):
return {
'id': str(o.id),
'name': str(o.name),
}
if isinstance(o, Message):
return {
'id': str(o.id),
'author': o.author,
'content': str(o.clean_content),
'created_at': o.created_at,
}
if isinstance(o, ImageStackResolveString):
return str(o)
if isinstance(o, SparkModule):
return {
'name': o.name,
'title': o.title,
'description': o.description,
'dependencies': o.dependencies,
'dependency_for': o.dependency_for,
'is_optional': o.optional
}
if isinstance(o, SparkModule):
return {
'name': o.name,
'title': o.title,
'description': o.description,
'dependencies': o.dependencies,
'dependency_for': o.dependency_for,
'is_optional': o.optional
}
if isinstance(o, InventoryItemType):
return {
'id': o.id,
'name': o.name,
'rarity_id': o.rarity_id,
'always_visible': o.always_visible,
'tradable': o.tradable,
'equippable': o.equippable,
'useable': o.useable,
'actions': json.loads(o.actions),
}
if isinstance(o, WheelspinProbability):
return {
'id': o.id,
'item_type_id': o.item_type_id,
'probability': o.probability,
'amount': o.amount,
'sound': o.sound,
}
return JSONEncoder.default(self, o)
return DiscordJSONEncoder | none | 1 | 2.166389 | 2 | |
optimizer/penkit_optimize/vrp_solver.py | ling1729/penkit | 1 | 6614631 | from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
from time import time
def vrp_solver(path_graph, initial_solution=None, runtime_seconds=60):
"""Solve a path using or-tools' Vehicle Routing Problem solver.
Params:
path_graph the PathGraph representing the problem
initial_solution a solution to start with (list of indices, not
including the origin)
runtime_seconds how long to search before returning
Returns: an ordered list of indices in the graph representing a
solution.
"""
# Create the VRP routing model. The 1 means we are only looking
# for a single path.
manager = pywrapcp.RoutingIndexManager(path_graph.num_nodes(), 1, path_graph.ORIGIN)
routing = pywrapcp.RoutingModel(manager)
# For every path node, add a disjunction so that we do not also
# draw its reverse.
for disjunction in path_graph.iter_disjunctions():
routing.AddDisjunction(disjunction)
# Wrap the distance function so that it converts to an integer,
# as or-tools requires. Values are multiplied by COST_MULTIPLIER
# prior to conversion to reduce the loss of precision.
COST_MULTIPLIER = 1e4
def distance(i, j):
from_node = manager.IndexToNode(i)
to_node = manager.IndexToNode(j)
return int(path_graph.cost(from_node, to_node) * COST_MULTIPLIER)
transit_callback_index = routing.RegisterTransitCallback(distance)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
start_time = time()
def found_solution():
t = time() - start_time
cost = routing.CostVar().Max() / COST_MULTIPLIER
print('\rBest solution at {} seconds has cost {} '.format(
int(t), cost), end='')
routing.AddAtSolutionCallback(found_solution)
# If we weren't supplied with a solution initially, construct one by taking
# all of the paths in their original direction, in their original order.
if not initial_solution:
initial_solution = [i for i, _ in path_graph.iter_disjunctions()]
# Compute the cost of the initial solution. This is the number we hope to
# improve on.
initial_assignment = routing.ReadAssignmentFromRoutes([initial_solution],
True)
print('Initial distance:',
initial_assignment.ObjectiveValue() / COST_MULTIPLIER)
# Set the parameters of the search.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.time_limit.seconds = runtime_seconds
search_parameters.local_search_metaheuristic = (routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
# Run the optimizer and report the final distance.
assignment = routing.SolveFromAssignmentWithParameters(initial_assignment, search_parameters)
print('Final distance:', assignment.ObjectiveValue() / COST_MULTIPLIER)
# Iterate over the result to produce a list to return as the solution.
solution = []
index = routing.Start(0)
while not routing.IsEnd(index):
index = assignment.Value(routing.NextVar(index))
node = manager.IndexToNode(index)
if node != 0:
# For compatibility with the greedy solution, exclude the origin.
solution.append(node)
return solution
| from ortools.constraint_solver import pywrapcp
from ortools.constraint_solver import routing_enums_pb2
from time import time
def vrp_solver(path_graph, initial_solution=None, runtime_seconds=60):
"""Solve a path using or-tools' Vehicle Routing Problem solver.
Params:
path_graph the PathGraph representing the problem
initial_solution a solution to start with (list of indices, not
including the origin)
runtime_seconds how long to search before returning
Returns: an ordered list of indices in the graph representing a
solution.
"""
# Create the VRP routing model. The 1 means we are only looking
# for a single path.
manager = pywrapcp.RoutingIndexManager(path_graph.num_nodes(), 1, path_graph.ORIGIN)
routing = pywrapcp.RoutingModel(manager)
# For every path node, add a disjunction so that we do not also
# draw its reverse.
for disjunction in path_graph.iter_disjunctions():
routing.AddDisjunction(disjunction)
# Wrap the distance function so that it converts to an integer,
# as or-tools requires. Values are multiplied by COST_MULTIPLIER
# prior to conversion to reduce the loss of precision.
COST_MULTIPLIER = 1e4
def distance(i, j):
from_node = manager.IndexToNode(i)
to_node = manager.IndexToNode(j)
return int(path_graph.cost(from_node, to_node) * COST_MULTIPLIER)
transit_callback_index = routing.RegisterTransitCallback(distance)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
start_time = time()
def found_solution():
t = time() - start_time
cost = routing.CostVar().Max() / COST_MULTIPLIER
print('\rBest solution at {} seconds has cost {} '.format(
int(t), cost), end='')
routing.AddAtSolutionCallback(found_solution)
# If we weren't supplied with a solution initially, construct one by taking
# all of the paths in their original direction, in their original order.
if not initial_solution:
initial_solution = [i for i, _ in path_graph.iter_disjunctions()]
# Compute the cost of the initial solution. This is the number we hope to
# improve on.
initial_assignment = routing.ReadAssignmentFromRoutes([initial_solution],
True)
print('Initial distance:',
initial_assignment.ObjectiveValue() / COST_MULTIPLIER)
# Set the parameters of the search.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.time_limit.seconds = runtime_seconds
search_parameters.local_search_metaheuristic = (routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
# Run the optimizer and report the final distance.
assignment = routing.SolveFromAssignmentWithParameters(initial_assignment, search_parameters)
print('Final distance:', assignment.ObjectiveValue() / COST_MULTIPLIER)
# Iterate over the result to produce a list to return as the solution.
solution = []
index = routing.Start(0)
while not routing.IsEnd(index):
index = assignment.Value(routing.NextVar(index))
node = manager.IndexToNode(index)
if node != 0:
# For compatibility with the greedy solution, exclude the origin.
solution.append(node)
return solution
| en | 0.893826 | Solve a path using or-tools' Vehicle Routing Problem solver. Params: path_graph the PathGraph representing the problem initial_solution a solution to start with (list of indices, not including the origin) runtime_seconds how long to search before returning Returns: an ordered list of indices in the graph representing a solution. # Create the VRP routing model. The 1 means we are only looking # for a single path. # For every path node, add a disjunction so that we do not also # draw its reverse. # Wrap the distance function so that it converts to an integer, # as or-tools requires. Values are multiplied by COST_MULTIPLIER # prior to conversion to reduce the loss of precision. # If we weren't supplied with a solution initially, construct one by taking # all of the paths in their original direction, in their original order. # Compute the cost of the initial solution. This is the number we hope to # improve on. # Set the parameters of the search. # Run the optimizer and report the final distance. # Iterate over the result to produce a list to return as the solution. # For compatibility with the greedy solution, exclude the origin. | 3.018194 | 3 |
tests/unit/transformers/test_categorical.py | sdv-dev/RDT | 49 | 6614632 | import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test___init__(self):
"""Test the ``__init__`` method.
Validate that the passed arguments are stored as attributes.
Input:
- a string passed to the ``error_on_unknown`` parameter.
Side effect:
- the ``error_on_unknown`` attribute is set to the passed string.
"""
# Run
transformer = OneHotEncodingTransformer(error_on_unknown='error_value')
# Asserts
assert transformer.error_on_unknown == 'error_value'
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the ``output_types``
dictionary. For this class, the ``output_types`` dictionary is described as:
{
'value1': 'float',
'value2': 'float',
...
}
The number of items in the dictionary is defined by the ``dummies`` attribute.
Setup:
- initialize a ``OneHotEncodingTransformer`` and set:
- the ``dummies`` attribute to a list.
- the ``column_prefix`` attribute to a string.
Output:
- the ``output_types`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys of the ``output_types`` dictionary.
"""
# Setup
transformer = OneHotEncodingTransformer()
transformer.column_prefix = 'abc'
transformer.dummies = [1, 2]
# Run
output = transformer.get_output_types()
# Assert
expected = {
'abc.value0': 'float',
'abc.value1': 'float'
}
assert output == expected
def test__fit_dummies_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` does not
contain nans.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c'])
def test__fit_dummies_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` contain ``np.nan``.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c', np.nan])
def test__fit_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b', 'c'])
assert ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_no_nans_numeric(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet._uniques, [1, 2, 3])
assert not ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_nans(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', np.nan])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b'])
assert ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_nans_numeric(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, np.nan])
np.testing.assert_array_equal(ohet._uniques, [1, 2])
assert not ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._num_dummies = 3
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._indexer = [0, 1, 2]
ohet._num_dummies = 3
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_encoded(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._dummy_na = True
ohet._num_dummies = 2
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._indexer = [0, 1]
ohet._dummy_na = True
ohet._num_dummies = 2
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a column of zeros.
Input:
- Series with unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros_categorical(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a column of zeros.
Input:
- Series with categorical and unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown_nan(self):
"""Test the ``_transform`` with unknown and nans.
This is an edge case for ``_transform`` where
unknowns should be zeros and nans should be
the last entry in the column.
Input:
- Series with unknown and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._dummy_na = True
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', np.nan]))
# Assert
expected = np.array([
[0, 0],
[0, 0],
[0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nans(self):
"""Test the ``transform`` without nans.
In this test ``transform`` should return an identity
matrix representing each item in the input.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``transform`` with nans.
In this test ``transform`` should return an identity matrix
representing each item in the input as well as nans.
Input:
- Series with categorical values and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column_filled_with_ones(self):
"""Test the ``transform`` on a single category.
In this test ``transform`` should return a column
filled with ones.
Input:
- Series with a single categorical value
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown(self):
"""Test the ``transform`` with unknown data.
In this test ``transform`` should raise an error
due to the attempt of transforming data with previously
unseen categories.
Input:
- Series with unknown categorical values
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a'])
ohet._fit(data)
# Assert
with np.testing.assert_raises(ValueError):
ohet._transform(['b'])
def test__transform_numeric(self):
"""Test the ``transform`` on numeric input.
In this test ``transform`` should return a matrix
representing each item in the input as one-hot encodings.
Input:
- Series with numeric input
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([1, 2])
ohet._fit(data)
expected = np.array([
[1, 0],
[0, 1],
])
# Run
out = ohet._transform(data)
# Assert
assert not ohet._dummy_encoded
np.testing.assert_array_equal(out, expected)
def test__reverse_transform_no_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', 'c'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', None])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_single(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = np.array([
[1],
[1],
[1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'a', 'a'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_1d(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = pd.Series([1, 1, 1])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'a', 'a'])
pd.testing.assert_series_equal(out, expected)
class TestLabelEncodingTransformer:
def test__fit(self):
"""Test the ``_fit`` method.
Validate that a unique integer representation for each category of the data is stored
in the ``categories_to_values`` attribute, and the reverse is stored in the
``values_to_categories`` attribute .
Setup:
- create an instance of the ``LabelEncodingTransformer``.
Input:
- a pandas series.
Side effects:
- set the ``values_to_categories`` dictionary to the appropriate value.
- set ``categories_to_values`` dictionary to the appropriate value.
"""
# Setup
data = pd.Series([1, 2, 3, 2, 1])
transformer = LabelEncodingTransformer()
# Run
transformer._fit(data)
# Assert
assert transformer.values_to_categories == {0: 1, 1: 2, 2: 3}
assert transformer.categories_to_values == {1: 0, 2: 1, 3: 2}
def test__transform(self):
"""Test the ``_transform`` method.
Validate that each category of the passed data is replaced with its corresponding
integer value.
Setup:
- create an instance of the ``LabelEncodingTransformer``, where
``categories_to_values`` is set to a dictionary.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([1, 2, 3])
transformer = LabelEncodingTransformer()
transformer.categories_to_values = {1: 0, 2: 1, 3: 2}
# Run
transformed = transformer._transform(data)
# Assert
pd.testing.assert_series_equal(transformed, pd.Series([0, 1, 2]))
def test__reverse_transform_clips_values(self):
"""Test the ``_reverse_transform`` method with values not in map.
If a value that is not in ``values_to_categories`` is passed
to ``reverse_transform``, then the value should be clipped to
the range of the dict's keys.
Input:
- array with values outside of dict
Output:
- categories corresponding to closest key in the dict
"""
# Setup
transformer = LabelEncodingTransformer()
transformer.values_to_categories = {0: 'a', 1: 'b', 2: 'c'}
data = pd.Series([0, 1, 10])
# Run
out = transformer._reverse_transform(data)
# Assert
pd.testing.assert_series_equal(out, pd.Series(['a', 'b', 'c']))
class TestCategoricalFuzzyTransformer:
def test___init__(self):
"""Test that the ``__init__`` method uses ``fuzzy==True`` by default."""
# Setup
transformer = CategoricalFuzzyTransformer()
# Assert
assert transformer.fuzzy
| import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test___init__(self):
"""Test the ``__init__`` method.
Validate that the passed arguments are stored as attributes.
Input:
- a string passed to the ``error_on_unknown`` parameter.
Side effect:
- the ``error_on_unknown`` attribute is set to the passed string.
"""
# Run
transformer = OneHotEncodingTransformer(error_on_unknown='error_value')
# Asserts
assert transformer.error_on_unknown == 'error_value'
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the ``output_types``
dictionary. For this class, the ``output_types`` dictionary is described as:
{
'value1': 'float',
'value2': 'float',
...
}
The number of items in the dictionary is defined by the ``dummies`` attribute.
Setup:
- initialize a ``OneHotEncodingTransformer`` and set:
- the ``dummies`` attribute to a list.
- the ``column_prefix`` attribute to a string.
Output:
- the ``output_types`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys of the ``output_types`` dictionary.
"""
# Setup
transformer = OneHotEncodingTransformer()
transformer.column_prefix = 'abc'
transformer.dummies = [1, 2]
# Run
output = transformer.get_output_types()
# Assert
expected = {
'abc.value0': 'float',
'abc.value1': 'float'
}
assert output == expected
def test__fit_dummies_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` does not
contain nans.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c'])
def test__fit_dummies_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` contain ``np.nan``.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c', np.nan])
def test__fit_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b', 'c'])
assert ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_no_nans_numeric(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet._uniques, [1, 2, 3])
assert not ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_nans(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', np.nan])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b'])
assert ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_nans_numeric(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, np.nan])
np.testing.assert_array_equal(ohet._uniques, [1, 2])
assert not ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._num_dummies = 3
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._indexer = [0, 1, 2]
ohet._num_dummies = 3
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_encoded(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._dummy_na = True
ohet._num_dummies = 2
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._indexer = [0, 1]
ohet._dummy_na = True
ohet._num_dummies = 2
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a column of zeros.
Input:
- Series with unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros_categorical(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a column of zeros.
Input:
- Series with categorical and unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown_nan(self):
"""Test the ``_transform`` with unknown and nans.
This is an edge case for ``_transform`` where
unknowns should be zeros and nans should be
the last entry in the column.
Input:
- Series with unknown and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._dummy_na = True
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', np.nan]))
# Assert
expected = np.array([
[0, 0],
[0, 0],
[0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nans(self):
"""Test the ``transform`` without nans.
In this test ``transform`` should return an identity
matrix representing each item in the input.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``transform`` with nans.
In this test ``transform`` should return an identity matrix
representing each item in the input as well as nans.
Input:
- Series with categorical values and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column_filled_with_ones(self):
"""Test the ``transform`` on a single category.
In this test ``transform`` should return a column
filled with ones.
Input:
- Series with a single categorical value
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown(self):
"""Test the ``transform`` with unknown data.
In this test ``transform`` should raise an error
due to the attempt of transforming data with previously
unseen categories.
Input:
- Series with unknown categorical values
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a'])
ohet._fit(data)
# Assert
with np.testing.assert_raises(ValueError):
ohet._transform(['b'])
def test__transform_numeric(self):
"""Test the ``transform`` on numeric input.
In this test ``transform`` should return a matrix
representing each item in the input as one-hot encodings.
Input:
- Series with numeric input
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([1, 2])
ohet._fit(data)
expected = np.array([
[1, 0],
[0, 1],
])
# Run
out = ohet._transform(data)
# Assert
assert not ohet._dummy_encoded
np.testing.assert_array_equal(out, expected)
def test__reverse_transform_no_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', 'c'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', None])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_single(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = np.array([
[1],
[1],
[1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'a', 'a'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_1d(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = pd.Series([1, 1, 1])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'a', 'a'])
pd.testing.assert_series_equal(out, expected)
class TestLabelEncodingTransformer:
def test__fit(self):
"""Test the ``_fit`` method.
Validate that a unique integer representation for each category of the data is stored
in the ``categories_to_values`` attribute, and the reverse is stored in the
``values_to_categories`` attribute .
Setup:
- create an instance of the ``LabelEncodingTransformer``.
Input:
- a pandas series.
Side effects:
- set the ``values_to_categories`` dictionary to the appropriate value.
- set ``categories_to_values`` dictionary to the appropriate value.
"""
# Setup
data = pd.Series([1, 2, 3, 2, 1])
transformer = LabelEncodingTransformer()
# Run
transformer._fit(data)
# Assert
assert transformer.values_to_categories == {0: 1, 1: 2, 2: 3}
assert transformer.categories_to_values == {1: 0, 2: 1, 3: 2}
def test__transform(self):
"""Test the ``_transform`` method.
Validate that each category of the passed data is replaced with its corresponding
integer value.
Setup:
- create an instance of the ``LabelEncodingTransformer``, where
``categories_to_values`` is set to a dictionary.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([1, 2, 3])
transformer = LabelEncodingTransformer()
transformer.categories_to_values = {1: 0, 2: 1, 3: 2}
# Run
transformed = transformer._transform(data)
# Assert
pd.testing.assert_series_equal(transformed, pd.Series([0, 1, 2]))
def test__reverse_transform_clips_values(self):
"""Test the ``_reverse_transform`` method with values not in map.
If a value that is not in ``values_to_categories`` is passed
to ``reverse_transform``, then the value should be clipped to
the range of the dict's keys.
Input:
- array with values outside of dict
Output:
- categories corresponding to closest key in the dict
"""
# Setup
transformer = LabelEncodingTransformer()
transformer.values_to_categories = {0: 'a', 1: 'b', 2: 'c'}
data = pd.Series([0, 1, 10])
# Run
out = transformer._reverse_transform(data)
# Assert
pd.testing.assert_series_equal(out, pd.Series(['a', 'b', 'c']))
class TestCategoricalFuzzyTransformer:
def test___init__(self):
"""Test that the ``__init__`` method uses ``fuzzy==True`` by default."""
# Setup
transformer = CategoricalFuzzyTransformer()
# Assert
assert transformer.fuzzy
| en | 0.705971 | Test the ``__set_state__`` method. Validate that the ``__dict__`` attribute is correctly udpdated when Setup: - create an instance of a ``CategoricalTransformer``. Side effect: - it updates the ``__dict__`` attribute of the object. # Setup # Run # Assert Passed arguments must be stored as attributes. # Run # Asserts Test the ``is_transform_deterministic`` method. Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter. Setup: - initialize a ``CategoricalTransformer`` with ``fuzzy = True``. Output: - the boolean value which is the opposite of ``fuzzy``. # Setup # Run # Assert Test the ``is_composition_identity`` method. Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method returns the opposite boolean value of the ``fuzzy`` parameter. Setup: - initialize a ``CategoricalTransformer`` with ``fuzzy = True``. Output: - the boolean value which is the opposite of ``fuzzy``. # Setup # Run # Assert Test the ``_get_intervals`` method. Validate that the intervals for each categorical value are correct. Input: - a pandas series containing categorical values. Output: - a tuple, where the first element describes the intervals for each categorical value (start, end). # Run # Asserts Test the ``_get_intervals`` method when data contains nan's. Validate that the intervals for each categorical value are correct, when passed data containing nan values. Input: - a pandas series cotaining nan values and categorical values. Output: - a tuple, where the first element describes the intervals for each categorical value (start, end). # Setup # Run # Assert # Setup # Run # Asserts # Setup # Run # Asserts # setup # Run # Asserts Test normalize data # Setup # Run # Asserts Test normalize data with clip=True # Setup # Run # Asserts Test reverse_transform a numpy.array # Setup # Run # Asserts Test that the `_transform_by_category` method is called. When the number of rows is greater than the number of categories, expect that the `_transform_by_category` method is called. Setup: The categorical transformer is instantiated with 4 categories. Input: - data with 5 rows. Output: - the output of `_transform_by_category`. Side effects: - `_transform_by_category` will be called once. # Setup # Run # Asserts Test the `_transform_by_category` method with numerical data. Expect that the correct transformed data is returned. Setup: The categorical transformer is instantiated with 4 categories and intervals. Input: - data with 5 rows. Ouptut: - the transformed data. # Setup # Run # Asserts Test the ``_transform_by_category`` method with data containing nans. Validate that the data is transformed correctly when it contains nan's. Setup: - the categorical transformer is instantiated, and the appropriate ``intervals`` attribute is set. Input: - a pandas series containing nan's. Output: - a numpy array containing the transformed data. # Setup # Run # Asserts Test the ``_transform_by_category`` method when ``fuzzy`` is True. Validate that the data is transformed correctly when ``fuzzy`` is True. Setup: - the categorical transformer is instantiated with ``fuzzy`` as True, and the appropriate ``intervals`` attribute is set. - the ``intervals`` attribute is set to a a dictionary of intervals corresponding to the elements of the passed data. - set the ``side_effect`` of the ``rvs_mock`` to the appropriate function. Input: - a pandas series. Output: - a numpy array containing the transformed data. Side effect: - ``rvs_mock`` should be called four times, one for each element of the intervals dictionary. # Setup # Run # Assert Test that the `_transform_by_row` method is called. When the number of rows is less than or equal to the number of categories, expect that the `_transform_by_row` method is called. Setup: The categorical transformer is instantiated with 4 categories. Input: - data with 4 rows Output: - the output of `_transform_by_row` Side effects: - `_transform_by_row` will be called once # Setup # Run # Asserts Test the `_transform_by_row` method with numerical data. Expect that the correct transformed data is returned. Setup: The categorical transformer is instantiated with 4 categories and intervals. Input: - data with 4 rows Ouptut: - the transformed data # Setup # Run # Asserts Test that the `_reverse_transform_by_matrix` method is called. When there is enough virtual memory, expect that the `_reverse_transform_by_matrix` method is called. Setup: The categorical transformer is instantiated with 4 categories. Also patch the `psutil.virtual_memory` function to return a large enough `available_memory`. Input: - numerical data with 4 rows Output: - the output of `_reverse_transform_by_matrix` Side effects: - `_reverse_transform_by_matrix` will be called once # Setup # Run # Asserts Test the _reverse_transform_by_matrix method with numerical data Expect that the transformed data is correctly reverse transformed. Setup: The categorical transformer is instantiated with 4 categories and means. Also patch the `psutil.virtual_memory` function to return a large enough `available_memory`. Input: - transformed data with 4 rows Ouptut: - the original data # Setup # Run # Assert Test that the `_reverse_transform_by_category` method is called. When there is not enough virtual memory and the number of rows is greater than the number of categories, expect that the `_reverse_transform_by_category` method is called. Setup: The categorical transformer is instantiated with 4 categories. Also patch the `psutil.virtual_memory` function to return an `available_memory` of 1. Input: - numerical data with 5 rows Output: - the output of `_reverse_transform_by_category` Side effects: - `_reverse_transform_by_category` will be called once # Setup # Run # Asserts Test the _reverse_transform_by_category method with numerical data. Expect that the transformed data is correctly reverse transformed. Setup: The categorical transformer is instantiated with 4 categories, and the means and intervals are set for those categories. Also patch the `psutil.virtual_memory` function to return an `available_memory` of 1. Input: - transformed data with 5 rows Ouptut: - the original data Test the ``_get_category_from_start`` method. Setup: - instantiate a ``CategoricalTransformer``, and set the attribute ``starts`` to a pandas dataframe with ``set_index`` as ``'start'``. Input: - an integer, an index from data. Output: - a category from the data. # Setup # Run # Assert Test that the `_reverse_transform_by_row` method is called. When there is not enough virtual memory and the number of rows is less than or equal to the number of categories, expect that the `_reverse_transform_by_row` method is called. Setup: The categorical transformer is instantiated with 4 categories. Also patch the `psutil.virtual_memory` function to return an `available_memory` of 1. Input: - numerical data with 4 rows Output: - the output of `_reverse_transform_by_row` Side effects: - `_reverse_transform_by_row` will be called once # Setup # Run # Asserts Test the _reverse_transform_by_row method with numerical data. Expect that the transformed data is correctly reverse transformed. Setup: The categorical transformer is instantiated with 4 categories, and the means, starts, and intervals are set for those categories. Also patch the `psutil.virtual_memory` function to return an `available_memory` of 1. Input: - transformed data with 4 rows Ouptut: - the original data # Setup # Run # Assert Test the ``__init__`` method. Validate that the passed arguments are stored as attributes. Input: - a string passed to the ``error_on_unknown`` parameter. Side effect: - the ``error_on_unknown`` attribute is set to the passed string. # Run # Asserts # Setup # Assert # Setup # Assert # Setup # Run # Assert # Setup # Run # Assert Test the ``get_output_types`` method. Validate that the ``_add_prefix`` method is properly applied to the ``output_types`` dictionary. For this class, the ``output_types`` dictionary is described as: { 'value1': 'float', 'value2': 'float', ... } The number of items in the dictionary is defined by the ``dummies`` attribute. Setup: - initialize a ``OneHotEncodingTransformer`` and set: - the ``dummies`` attribute to a list. - the ``column_prefix`` attribute to a string. Output: - the ``output_types`` dictionary, but with ``self.column_prefix`` added to the beginning of the keys of the ``output_types`` dictionary. # Setup # Run # Assert Test the ``_fit`` method without nans. Check that ``self.dummies`` does not contain nans. Input: - Series with values # Setup # Run # Assert Test the ``_fit`` method without nans. Check that ``self.dummies`` contain ``np.nan``. Input: - Series with values # Setup # Run # Assert Test the ``_fit`` method without nans. Check that the settings of the transformer are properly set based on the input. Encoding should be activated Input: - Series with values # Setup # Run # Assert Test the ``_fit`` method without nans. Check that the settings of the transformer are properly set based on the input. Encoding should be deactivated Input: - Series with values # Setup # Run # Assert Test the ``_fit`` method with nans. Check that the settings of the transformer are properly set based on the input. Encoding and NA should be activated. Input: - Series with containing nan values # Setup # Run # Assert Test the ``_fit`` method with nans. Check that the settings of the transformer are properly set based on the input. Encoding should be deactivated and NA activated. Input: - Series with containing nan values # Setup # Run # Assert # Setup # Run # Assert Test the ``_transform`` method without nans. The values passed to ``_transform`` should be returned in a one-hot encoding representation. Input: - Series with values Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` method without nans. The values passed to ``_transform`` should be returned in a one-hot encoding representation using the categorical branch. Input: - Series with categorical values Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` method with nans. The values passed to ``_transform`` should be returned in a one-hot encoding representation. Null values should be represented by the same encoding. Input: - Series with values containing nans Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` method with nans. The values passed to ``_transform`` should be returned in a one-hot encoding representation using the categorical branch. Null values should be represented by the same encoding. Input: - Series with categorical values containing nans Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` with one category. The values passed to ``_transform`` should be returned in a one-hot encoding representation where it should be a single column. Input: - Series with a single category Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` with one category. The values passed to ``_transform`` should be returned in a one-hot encoding representation using the categorical branch where it should be a single column. Input: - Series with a single category Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` with unknown category. The values passed to ``_transform`` should be returned in a one-hot encoding representation where it should be a column of zeros. Input: - Series with unknown values Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` with unknown category. The values passed to ``_transform`` should be returned in a one-hot encoding representation using the categorical branch where it should be a column of zeros. Input: - Series with categorical and unknown values Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``_transform`` with unknown and nans. This is an edge case for ``_transform`` where unknowns should be zeros and nans should be the last entry in the column. Input: - Series with unknown and nans Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``transform`` without nans. In this test ``transform`` should return an identity matrix representing each item in the input. Input: - Series with categorical values Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``transform`` with nans. In this test ``transform`` should return an identity matrix representing each item in the input as well as nans. Input: - Series with categorical values and nans Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``transform`` on a single category. In this test ``transform`` should return a column filled with ones. Input: - Series with a single categorical value Output: - one-hot encoding of the input # Setup # Run # Assert Test the ``transform`` with unknown data. In this test ``transform`` should raise an error due to the attempt of transforming data with previously unseen categories. Input: - Series with unknown categorical values # Setup # Assert Test the ``transform`` on numeric input. In this test ``transform`` should return a matrix representing each item in the input as one-hot encodings. Input: - Series with numeric input Output: - one-hot encoding of the input # Setup # Run # Assert # Setup # Run # Assert # Setup # Run # Assert # Setup # Run # Assert # Setup # Run # Assert Test the ``_fit`` method. Validate that a unique integer representation for each category of the data is stored in the ``categories_to_values`` attribute, and the reverse is stored in the ``values_to_categories`` attribute . Setup: - create an instance of the ``LabelEncodingTransformer``. Input: - a pandas series. Side effects: - set the ``values_to_categories`` dictionary to the appropriate value. - set ``categories_to_values`` dictionary to the appropriate value. # Setup # Run # Assert Test the ``_transform`` method. Validate that each category of the passed data is replaced with its corresponding integer value. Setup: - create an instance of the ``LabelEncodingTransformer``, where ``categories_to_values`` is set to a dictionary. Input: - a pandas series. Output: - a numpy array containing the transformed data. # Setup # Run # Assert Test the ``_reverse_transform`` method with values not in map. If a value that is not in ``values_to_categories`` is passed to ``reverse_transform``, then the value should be clipped to the range of the dict's keys. Input: - array with values outside of dict Output: - categories corresponding to closest key in the dict # Setup # Run # Assert Test that the ``__init__`` method uses ``fuzzy==True`` by default. # Setup # Assert | 2.57508 | 3 |
segmentation/customs/pooling_layers.py | VolodymyrChapman/thyroidclassification | 1 | 6614633 | <gh_stars>1-10
from torch import nn
from customs.activation_functions import Mish
class ConvPool(nn.Module):
def __init__(self, ch_in, act_fun, normalization):
"""
:param ch_in:
:param act_fun:
:param normalization:
"""
super().__init__()
self.conv_pool = list()
self.conv_pool.append(nn.Conv2d(ch_in, ch_in, kernel_size=3, stride=2, padding=1, bias=True))
if act_fun == 'relu':
self.conv_pool.append(nn.ReLU(inplace=True))
elif act_fun == 'leakyrelu':
self.conv_pool.append(nn.LeakyReLU(inplace=True))
elif act_fun == 'elu':
self.conv_pool.append(nn.ELU(inplace=True))
elif act_fun == 'mish':
self.conv_pool.append(Mish())
else:
raise Exception('Unsupported activation function: {}'.format(act_fun))
if normalization == 'bn':
self.conv_pool.append(nn.BatchNorm2d(ch_in))
elif normalization == 'gn':
self.conv_pool.append(nn.GroupNorm(num_groups=8, num_channels=ch_in))
elif normalization == 'in':
self.conv_pool.append(nn.InstanceNorm2d(num_features=ch_in))
else:
raise Exception('Unsupported normalization: {}'.format(normalization))
self.conv_pool = nn.Sequential(*self.conv_pool)
def forward(self, x):
"""
:param x: Block input (image or feature maps).
:type x:
:return: Block output (feature maps).
"""
for i in range(len(self.conv_pool)):
x = self.conv_pool[i](x)
return x | from torch import nn
from customs.activation_functions import Mish
class ConvPool(nn.Module):
def __init__(self, ch_in, act_fun, normalization):
"""
:param ch_in:
:param act_fun:
:param normalization:
"""
super().__init__()
self.conv_pool = list()
self.conv_pool.append(nn.Conv2d(ch_in, ch_in, kernel_size=3, stride=2, padding=1, bias=True))
if act_fun == 'relu':
self.conv_pool.append(nn.ReLU(inplace=True))
elif act_fun == 'leakyrelu':
self.conv_pool.append(nn.LeakyReLU(inplace=True))
elif act_fun == 'elu':
self.conv_pool.append(nn.ELU(inplace=True))
elif act_fun == 'mish':
self.conv_pool.append(Mish())
else:
raise Exception('Unsupported activation function: {}'.format(act_fun))
if normalization == 'bn':
self.conv_pool.append(nn.BatchNorm2d(ch_in))
elif normalization == 'gn':
self.conv_pool.append(nn.GroupNorm(num_groups=8, num_channels=ch_in))
elif normalization == 'in':
self.conv_pool.append(nn.InstanceNorm2d(num_features=ch_in))
else:
raise Exception('Unsupported normalization: {}'.format(normalization))
self.conv_pool = nn.Sequential(*self.conv_pool)
def forward(self, x):
"""
:param x: Block input (image or feature maps).
:type x:
:return: Block output (feature maps).
"""
for i in range(len(self.conv_pool)):
x = self.conv_pool[i](x)
return x | en | 0.295494 | :param ch_in: :param act_fun: :param normalization: :param x: Block input (image or feature maps). :type x: :return: Block output (feature maps). | 2.815237 | 3 |
Deep360Pilot-CVPR17-tf1.2/demo.py | remega/OF | 0 | 6614634 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
from util import *
from glob import glob
from model import Deep360Pilot
from MeanVelocityDiff import MeanVelocityDiff
def video_base(Agent, vid_domain, vid_name):
""" Run test as a whole video, instead of cropped batches """
# Initialization
FEATURE_PATH = os.path.join(Agent.data_path, 'feature_{}_{}boxes'.format(vid_domain, Agent.n_detection), vid_name)
print FEATURE_PATH
iou = 0.0
acc = 0.0
vel_diff = 0.0
total_loss = 0.0
total_deltaloss = 0.0
# Init prediction
view_trajectory = None
init_viewangle_value = np.ones([Agent.batch_size, Agent.n_output])/2
# Init MVD
MVD = MeanVelocityDiff(W=Agent.W)
# calc n_clips
n_clips = len(glob(os.path.join(FEATURE_PATH, 'roisavg*.npy')))
assert n_clips > 0, "There is no feature file at {}".format(FEATURE_PATH)
print "Found {} clips in {}".format(n_clips, FEATURE_PATH)
# n_clips - 1 since we drop last batch which may contain null data.
n_clips = n_clips - 1
# Initial Session
with tf.Session(config = Agent.sess_config) as sess:
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
sess.run(init)
saver = tf.train.Saver()
# Load model and continue
if Agent.restore_path and tf.train.checkpoint_exists(Agent.restore_path):
saver.restore(sess, Agent.restore_path)
print "Your model restored!!!"
else:
print "Model Not Found!!!"
return False
# generate roislist and roisavg of specified video
# from 1 to n_clips only, abandon last one clip
for count in xrange(1, n_clips + 1):
# load test_data
box_center = np.load(os.path.join(FEATURE_PATH, 'divide_area_pruned_boxes{:04d}.npy'.format(count)))
roisavg_batch = np.load(os.path.join(FEATURE_PATH, 'pruned_roisavg{:04d}.npy'.format(count)))
hof_batch = np.load(os.path.join(FEATURE_PATH, 'hof{:04d}.npy'.format(count)))
box_center = np.tile(np.expand_dims(box_center, 0), [Agent.batch_size, 1, 1, 1])
roisavg_batch = np.tile(np.expand_dims(roisavg_batch, 0), [Agent.batch_size, 1, 1, 1])
hof_batch = np.tile(np.expand_dims(hof_batch, 0), [Agent.batch_size, 1, 1, 1])
oracle_viewangle_batch = np.zeros([Agent.batch_size, Agent.n_frames, Agent.n_output])
one_hot_label_batch = np.zeros([Agent.batch_size, Agent.n_frames, Agent.n_detection])
box = box_center.copy()
gt = oracle_viewangle_batch.copy()
box_center[:,:,:,0] = (box_center[:,:,:,0]/Agent.W + box_center[:,:,:,2]/Agent.W)/2
box_center[:,:,:,1] = (box_center[:,:,:,1]/Agent.H + box_center[:,:,:,3]/Agent.H)/2
box_center = box_center[:, :, :, :2]
oracle_viewangle_batch[:,:,0] = oracle_viewangle_batch[:,:,0]/Agent.W
oracle_viewangle_batch[:,:,1] = oracle_viewangle_batch[:,:,1]/Agent.H
[loss, deltaloss, viewangle_out, sal_box_out] = sess.run(
[Agent.cost, Agent.delta, Agent.viewangle, Agent.sal_box_prob], \
feed_dict={
Agent.obj_app: roisavg_batch,
Agent.oracle_actions: one_hot_label_batch,
Agent.oracle_viewangle: oracle_viewangle_batch, \
Agent.box_center: box_center,
Agent.hof: hof_batch,
Agent.keep_prob:1.0,
Agent.init_viewangle: init_viewangle_value,
Agent._phase: Agent.bool_two_phase
}
)
total_loss += loss/Agent.n_frames
total_deltaloss += deltaloss/Agent.n_frames
# Feed in init value to next batch
init_viewangle_value = viewangle_out[:,-1,:].copy()
viewangle_out[:,:,0] = (viewangle_out[:,:,0]*Agent.W).astype(int)
viewangle_out[:,:,1] = (viewangle_out[:,:,1]*Agent.H).astype(int)
corr = np.sum(np.logical_and(one_hot_label_batch, sal_box_out))
ac = float(corr) / (Agent.batch_size * Agent.n_frames)
iu = score(Agent, viewangle_out, gt[:,:,:2], False)
# only one row in batch are used, average to get result.
# convert into degree form (* 360 / 1920 / Agent.n_frames)
vd = MVD.batch_vel_diff(viewangle_out) * 0.1875 / (Agent.n_frames)
acc += ac
iou += iu
vel_diff += vd
print "Video: {:3d} | Corr: {:3d}, IoU: {:.3f}, Acc: {:.3f}, Vel_diff: {:.3f}".format(
count, corr, iu, ac, vd)
print "Oracle: ", np.where(one_hot_label_batch[0])
print "----------------------------------------------------------------"
print "Prediction: ", np.where(sal_box_out[0])
if view_trajectory is None:
view_trajectory = viewangle_out[0].copy()
else:
view_trajectory = np.vstack((view_trajectory, viewangle_out[0].copy()))
ret = 0
if Agent._show:
nimages = (count-1)*Agent.n_frames
for nimage in xrange(Agent.n_frames):
vidname = vid_name + '/' + str(nimages+nimage+1).zfill(6)
if Agent._save_img and not os.path.isdir(Agent.save_path + vid_name):
print 'Make dir at ' + Agent.save_path + vid_name
os.makedirs(Agent.save_path + vid_name) # mkdir recursively
if Agent._show:
print
print ("num_batch: {}, video: {}, count: {}, nimage: {}").format(n_clips, vidname, count, nimage)
ret = visual_gaze(Agent, vidname, gt[0,nimage,:2], viewangle_out[0,nimage, :], sal_box_out[0,nimage, :], box[0,nimage, :, :])
if ret == -1 or ret == -2 or ret == -3:
break
if ret == -1 or ret == -2:
break
if ret == -1:
break
print "Loss = {:.3f}".format(total_loss/n_clips) # 40/20, number of training/testing set
print "DeltaLoss = {:.3f}".format(total_deltaloss/n_clips)
print "IOU = {:.3f}".format(iou/n_clips)
print "Acc = {:.3f}".format(acc/n_clips)
print "Velocity Diff = {:.3f}".format(vel_diff/n_clips)
if Agent._save_pred:
print view_trajectory.shape
out_path = '{}{}_{}_{}_lam{}_{}_best_model'.format(
Agent.save_path,
vid_name,
Agent.domain,
Agent.n_detection,
Agent.regress_lmbda,
Agent.two_phase)
print "Save prediction of vid {} to {}".format(vid_name, out_path)
np.save(out_path, view_trajectory)
with open(out_path + '.txt', 'w') as f:
f.write("Loss = {:.5f}\n".format(total_loss/n_clips))
f.write("DeltaLoss = {:.5f}\n".format(total_deltaloss/n_clips))
f.write("IOU = {:.5f}\n".format(iou/n_clips))
f.write("Acc = {:.5f}\n".format(acc/n_clips))
f.write("Velocity Diff = {:.5f}\n".format(vel_diff/n_clips))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import tensorflow as tf
from util import *
from glob import glob
from model import Deep360Pilot
from MeanVelocityDiff import MeanVelocityDiff
def video_base(Agent, vid_domain, vid_name):
""" Run test as a whole video, instead of cropped batches """
# Initialization
FEATURE_PATH = os.path.join(Agent.data_path, 'feature_{}_{}boxes'.format(vid_domain, Agent.n_detection), vid_name)
print FEATURE_PATH
iou = 0.0
acc = 0.0
vel_diff = 0.0
total_loss = 0.0
total_deltaloss = 0.0
# Init prediction
view_trajectory = None
init_viewangle_value = np.ones([Agent.batch_size, Agent.n_output])/2
# Init MVD
MVD = MeanVelocityDiff(W=Agent.W)
# calc n_clips
n_clips = len(glob(os.path.join(FEATURE_PATH, 'roisavg*.npy')))
assert n_clips > 0, "There is no feature file at {}".format(FEATURE_PATH)
print "Found {} clips in {}".format(n_clips, FEATURE_PATH)
# n_clips - 1 since we drop last batch which may contain null data.
n_clips = n_clips - 1
# Initial Session
with tf.Session(config = Agent.sess_config) as sess:
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
sess.run(init)
saver = tf.train.Saver()
# Load model and continue
if Agent.restore_path and tf.train.checkpoint_exists(Agent.restore_path):
saver.restore(sess, Agent.restore_path)
print "Your model restored!!!"
else:
print "Model Not Found!!!"
return False
# generate roislist and roisavg of specified video
# from 1 to n_clips only, abandon last one clip
for count in xrange(1, n_clips + 1):
# load test_data
box_center = np.load(os.path.join(FEATURE_PATH, 'divide_area_pruned_boxes{:04d}.npy'.format(count)))
roisavg_batch = np.load(os.path.join(FEATURE_PATH, 'pruned_roisavg{:04d}.npy'.format(count)))
hof_batch = np.load(os.path.join(FEATURE_PATH, 'hof{:04d}.npy'.format(count)))
box_center = np.tile(np.expand_dims(box_center, 0), [Agent.batch_size, 1, 1, 1])
roisavg_batch = np.tile(np.expand_dims(roisavg_batch, 0), [Agent.batch_size, 1, 1, 1])
hof_batch = np.tile(np.expand_dims(hof_batch, 0), [Agent.batch_size, 1, 1, 1])
oracle_viewangle_batch = np.zeros([Agent.batch_size, Agent.n_frames, Agent.n_output])
one_hot_label_batch = np.zeros([Agent.batch_size, Agent.n_frames, Agent.n_detection])
box = box_center.copy()
gt = oracle_viewangle_batch.copy()
box_center[:,:,:,0] = (box_center[:,:,:,0]/Agent.W + box_center[:,:,:,2]/Agent.W)/2
box_center[:,:,:,1] = (box_center[:,:,:,1]/Agent.H + box_center[:,:,:,3]/Agent.H)/2
box_center = box_center[:, :, :, :2]
oracle_viewangle_batch[:,:,0] = oracle_viewangle_batch[:,:,0]/Agent.W
oracle_viewangle_batch[:,:,1] = oracle_viewangle_batch[:,:,1]/Agent.H
[loss, deltaloss, viewangle_out, sal_box_out] = sess.run(
[Agent.cost, Agent.delta, Agent.viewangle, Agent.sal_box_prob], \
feed_dict={
Agent.obj_app: roisavg_batch,
Agent.oracle_actions: one_hot_label_batch,
Agent.oracle_viewangle: oracle_viewangle_batch, \
Agent.box_center: box_center,
Agent.hof: hof_batch,
Agent.keep_prob:1.0,
Agent.init_viewangle: init_viewangle_value,
Agent._phase: Agent.bool_two_phase
}
)
total_loss += loss/Agent.n_frames
total_deltaloss += deltaloss/Agent.n_frames
# Feed in init value to next batch
init_viewangle_value = viewangle_out[:,-1,:].copy()
viewangle_out[:,:,0] = (viewangle_out[:,:,0]*Agent.W).astype(int)
viewangle_out[:,:,1] = (viewangle_out[:,:,1]*Agent.H).astype(int)
corr = np.sum(np.logical_and(one_hot_label_batch, sal_box_out))
ac = float(corr) / (Agent.batch_size * Agent.n_frames)
iu = score(Agent, viewangle_out, gt[:,:,:2], False)
# only one row in batch are used, average to get result.
# convert into degree form (* 360 / 1920 / Agent.n_frames)
vd = MVD.batch_vel_diff(viewangle_out) * 0.1875 / (Agent.n_frames)
acc += ac
iou += iu
vel_diff += vd
print "Video: {:3d} | Corr: {:3d}, IoU: {:.3f}, Acc: {:.3f}, Vel_diff: {:.3f}".format(
count, corr, iu, ac, vd)
print "Oracle: ", np.where(one_hot_label_batch[0])
print "----------------------------------------------------------------"
print "Prediction: ", np.where(sal_box_out[0])
if view_trajectory is None:
view_trajectory = viewangle_out[0].copy()
else:
view_trajectory = np.vstack((view_trajectory, viewangle_out[0].copy()))
ret = 0
if Agent._show:
nimages = (count-1)*Agent.n_frames
for nimage in xrange(Agent.n_frames):
vidname = vid_name + '/' + str(nimages+nimage+1).zfill(6)
if Agent._save_img and not os.path.isdir(Agent.save_path + vid_name):
print 'Make dir at ' + Agent.save_path + vid_name
os.makedirs(Agent.save_path + vid_name) # mkdir recursively
if Agent._show:
print
print ("num_batch: {}, video: {}, count: {}, nimage: {}").format(n_clips, vidname, count, nimage)
ret = visual_gaze(Agent, vidname, gt[0,nimage,:2], viewangle_out[0,nimage, :], sal_box_out[0,nimage, :], box[0,nimage, :, :])
if ret == -1 or ret == -2 or ret == -3:
break
if ret == -1 or ret == -2:
break
if ret == -1:
break
print "Loss = {:.3f}".format(total_loss/n_clips) # 40/20, number of training/testing set
print "DeltaLoss = {:.3f}".format(total_deltaloss/n_clips)
print "IOU = {:.3f}".format(iou/n_clips)
print "Acc = {:.3f}".format(acc/n_clips)
print "Velocity Diff = {:.3f}".format(vel_diff/n_clips)
if Agent._save_pred:
print view_trajectory.shape
out_path = '{}{}_{}_{}_lam{}_{}_best_model'.format(
Agent.save_path,
vid_name,
Agent.domain,
Agent.n_detection,
Agent.regress_lmbda,
Agent.two_phase)
print "Save prediction of vid {} to {}".format(vid_name, out_path)
np.save(out_path, view_trajectory)
with open(out_path + '.txt', 'w') as f:
f.write("Loss = {:.5f}\n".format(total_loss/n_clips))
f.write("DeltaLoss = {:.5f}\n".format(total_deltaloss/n_clips))
f.write("IOU = {:.5f}\n".format(iou/n_clips))
f.write("Acc = {:.5f}\n".format(acc/n_clips))
f.write("Velocity Diff = {:.5f}\n".format(vel_diff/n_clips))
| en | 0.701793 | #!/usr/bin/env python # -*- coding: utf-8 -*- Run test as a whole video, instead of cropped batches # Initialization # Init prediction # Init MVD # calc n_clips # n_clips - 1 since we drop last batch which may contain null data. # Initial Session # Initializing the variables # Launch the graph # Load model and continue # generate roislist and roisavg of specified video # from 1 to n_clips only, abandon last one clip # load test_data # Feed in init value to next batch # only one row in batch are used, average to get result. # convert into degree form (* 360 / 1920 / Agent.n_frames) # mkdir recursively # 40/20, number of training/testing set | 2.348528 | 2 |
dazu/server.py | Dazu-io/dazu | 2 | 6614635 | from sanic import Sanic
from dazu import version
from dazu.components.engine import Engine
from dazu.config import DazuConfig
from dazu.routes import router
class Server:
config: DazuConfig
engine: Engine
@classmethod
def start(cls, config: DazuConfig, engine: Engine):
cls.config = config
cls.engine = engine
app = Sanic()
# app.config['DAVID_CONFIG', config]
# app.config['DAVID_ENGINE', engine]
app.config["API_BASEPATH"] = "/api"
app.config["API_TITLE"] = "Dazu"
app.config["API_VERSION"] = version.__version__
app.blueprint(router)
app.run(host="0.0.0.0", port=5000)
| from sanic import Sanic
from dazu import version
from dazu.components.engine import Engine
from dazu.config import DazuConfig
from dazu.routes import router
class Server:
config: DazuConfig
engine: Engine
@classmethod
def start(cls, config: DazuConfig, engine: Engine):
cls.config = config
cls.engine = engine
app = Sanic()
# app.config['DAVID_CONFIG', config]
# app.config['DAVID_ENGINE', engine]
app.config["API_BASEPATH"] = "/api"
app.config["API_TITLE"] = "Dazu"
app.config["API_VERSION"] = version.__version__
app.blueprint(router)
app.run(host="0.0.0.0", port=5000)
| en | 0.267234 | # app.config['DAVID_CONFIG', config] # app.config['DAVID_ENGINE', engine] | 2.177534 | 2 |
API/app/Models/Course.py | prattcmp/Attendance-Manager | 4 | 6614636 | <reponame>prattcmp/Attendance-Manager
from ..Models import db
from enum import Enum
users_courses = db.Table(
'users_courses',
db.Column('user_id', db.Integer, db.ForeignKey('users.id', ondelete='CASCADE')),
db.Column('course_id', db.Integer, db.ForeignKey('courses.id', ondelete='CASCADE'))
)
# Creating the courses table
class Course(db.Model):
__tablename__ = 'courses'
# Creating the columns of the course table
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False, server_default='')
enrollment_code = db.Column(db.String(8))
schedules = db.relationship('Schedule', passive_deletes=True, backref='course', lazy="joined")
users = db.relationship('User',
secondary=users_courses,
backref=db.backref('courses', passive_deletes=True, lazy='dynamic'))
roles = db.relationship('Role', passive_deletes=True, backref='course', lazy='dynamic')
created_at = db.Column(db.DateTime, default=db.func.now())
modified_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
def __init__(self, name=None):
if name is not None:
self.name = name
def to_dict(self, role_name=None, schedule=None):
course_dict = {
"id": self.id,
"name": self.name,
"enrollment_code": self.enrollment_code,
"created_at": str(self.created_at),
"modified_at": str(self.modified_at)
}
if role_name is not None:
if isinstance(role_name, Enum):
role_name = role_name.value
course_dict['role'] = role_name
# Adds schedule and role information to dict
if schedule is not None:
course_dict.update(schedule.to_dict())
return course_dict
| from ..Models import db
from enum import Enum
users_courses = db.Table(
'users_courses',
db.Column('user_id', db.Integer, db.ForeignKey('users.id', ondelete='CASCADE')),
db.Column('course_id', db.Integer, db.ForeignKey('courses.id', ondelete='CASCADE'))
)
# Creating the courses table
class Course(db.Model):
__tablename__ = 'courses'
# Creating the columns of the course table
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), nullable=False, server_default='')
enrollment_code = db.Column(db.String(8))
schedules = db.relationship('Schedule', passive_deletes=True, backref='course', lazy="joined")
users = db.relationship('User',
secondary=users_courses,
backref=db.backref('courses', passive_deletes=True, lazy='dynamic'))
roles = db.relationship('Role', passive_deletes=True, backref='course', lazy='dynamic')
created_at = db.Column(db.DateTime, default=db.func.now())
modified_at = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
def __init__(self, name=None):
if name is not None:
self.name = name
def to_dict(self, role_name=None, schedule=None):
course_dict = {
"id": self.id,
"name": self.name,
"enrollment_code": self.enrollment_code,
"created_at": str(self.created_at),
"modified_at": str(self.modified_at)
}
if role_name is not None:
if isinstance(role_name, Enum):
role_name = role_name.value
course_dict['role'] = role_name
# Adds schedule and role information to dict
if schedule is not None:
course_dict.update(schedule.to_dict())
return course_dict | en | 0.682001 | # Creating the courses table # Creating the columns of the course table # Adds schedule and role information to dict | 2.551554 | 3 |
main/subsets/subsets-2.py | EliahKagan/old-practice-snapshot | 0 | 6614637 | <gh_stars>0
class Solution:
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
powerset = [[]]
for singleton in ([element] for element in nums):
for subset in itertools.islice(powerset, len(powerset)):
powerset.append(subset + singleton)
return powerset
| class Solution:
def subsets(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
powerset = [[]]
for singleton in ([element] for element in nums):
for subset in itertools.islice(powerset, len(powerset)):
powerset.append(subset + singleton)
return powerset | en | 0.118508 | :type nums: List[int] :rtype: List[List[int]] | 3.204445 | 3 |
randimage/utils.py | nareto/randimage | 21 | 6614638 | import random
import matplotlib.pyplot as plt
import numpy as np
from .masks import MASKS
from .paths import PATHS
from .coloredpath import ColoredPath
def show_array(array, cmap='gray'):
plt.imshow(array, cmap=cmap)
plt.tight_layout()
plt.axis('off')
plt.show()
def show_img_list(img_list, shape, cmap='gray', figsize=None):
# figs, axis = plt.subplots(shape[0],shape[1])
nrow,ncol = shape
if figsize is None: figsize = (ncol + 1, nrow + 1)
fig, axis = plt.subplots(
nrow, ncol,
gridspec_kw=dict(wspace=0.0, hspace=0.0,
top=1. - 0.5 / (figsize[1]), bottom=0.5 / (figsize[1]),
left=0.5 / (figsize[0]), right=1 - 0.5 / (figsize[0])),
figsize=figsize,
sharey='row', sharex='col', # optionally
)
for idx, img in enumerate(img_list):
row, col = np.unravel_index(idx, shape=shape)
ax = axis[row, col]
ax.imshow(img, cmap=cmap)
ax.axis('off')
fig.show()
return fig
def get_random_image(img_size):
mask = random.choice(MASKS)(img_size).get_mask()
path = random.choice(PATHS)(mask).get_path()
img = ColoredPath(path, img_size).get_colored_path()
return img | import random
import matplotlib.pyplot as plt
import numpy as np
from .masks import MASKS
from .paths import PATHS
from .coloredpath import ColoredPath
def show_array(array, cmap='gray'):
plt.imshow(array, cmap=cmap)
plt.tight_layout()
plt.axis('off')
plt.show()
def show_img_list(img_list, shape, cmap='gray', figsize=None):
# figs, axis = plt.subplots(shape[0],shape[1])
nrow,ncol = shape
if figsize is None: figsize = (ncol + 1, nrow + 1)
fig, axis = plt.subplots(
nrow, ncol,
gridspec_kw=dict(wspace=0.0, hspace=0.0,
top=1. - 0.5 / (figsize[1]), bottom=0.5 / (figsize[1]),
left=0.5 / (figsize[0]), right=1 - 0.5 / (figsize[0])),
figsize=figsize,
sharey='row', sharex='col', # optionally
)
for idx, img in enumerate(img_list):
row, col = np.unravel_index(idx, shape=shape)
ax = axis[row, col]
ax.imshow(img, cmap=cmap)
ax.axis('off')
fig.show()
return fig
def get_random_image(img_size):
mask = random.choice(MASKS)(img_size).get_mask()
path = random.choice(PATHS)(mask).get_path()
img = ColoredPath(path, img_size).get_colored_path()
return img | en | 0.329531 | # figs, axis = plt.subplots(shape[0],shape[1]) # optionally | 2.764377 | 3 |
crc-sus.py | ds-04/slurm-bank | 3 | 6614639 | <filename>crc-sus.py
#!/usr/bin/env /absolute/path/to/py_wrap.sh
''' crc-sus.py -- Get SUs from crc-bank.db
Usage:
crc-sus.py <account>
crc-sus.py -h | --help
crc-sus.py -v | --version
Positional Arguments:
<account> The Slurm account
Options:
-h --help Print this screen and exit
-v --version Print the version of crc-sus.py
'''
# Test:
# 1. Make sure item exists
def check_item_in_table(table, account):
if table.find_one(account=account) is None:
exit("ERROR: The account: {0} doesn't appear to exist".format(account))
# Constants/Parameters, modify these
DATABASE = '/abolute/path/to/crc-bank.db'
import dataset
from docopt import docopt
# The magical mystical docopt line
arguments = docopt(__doc__, version='crc-sus.py version 0.0.1')
# Connect to the database and get the limits table
# Absolute path ////
db = dataset.connect('sqlite:///{0}'.format(DATABASE))
table = db['crc']
# Check that account exists
check_item_in_table(table, arguments['<account>'])
# Print out SUs
string = "Account {0} has {1} SUs"
sus = table.find_one(account=arguments['<account>'])['su_limit_hrs']
print(string.format(arguments['<account>'], sus))
| <filename>crc-sus.py
#!/usr/bin/env /absolute/path/to/py_wrap.sh
''' crc-sus.py -- Get SUs from crc-bank.db
Usage:
crc-sus.py <account>
crc-sus.py -h | --help
crc-sus.py -v | --version
Positional Arguments:
<account> The Slurm account
Options:
-h --help Print this screen and exit
-v --version Print the version of crc-sus.py
'''
# Test:
# 1. Make sure item exists
def check_item_in_table(table, account):
if table.find_one(account=account) is None:
exit("ERROR: The account: {0} doesn't appear to exist".format(account))
# Constants/Parameters, modify these
DATABASE = '/abolute/path/to/crc-bank.db'
import dataset
from docopt import docopt
# The magical mystical docopt line
arguments = docopt(__doc__, version='crc-sus.py version 0.0.1')
# Connect to the database and get the limits table
# Absolute path ////
db = dataset.connect('sqlite:///{0}'.format(DATABASE))
table = db['crc']
# Check that account exists
check_item_in_table(table, arguments['<account>'])
# Print out SUs
string = "Account {0} has {1} SUs"
sus = table.find_one(account=arguments['<account>'])['su_limit_hrs']
print(string.format(arguments['<account>'], sus))
| en | 0.370579 | #!/usr/bin/env /absolute/path/to/py_wrap.sh crc-sus.py -- Get SUs from crc-bank.db Usage: crc-sus.py <account> crc-sus.py -h | --help crc-sus.py -v | --version Positional Arguments: <account> The Slurm account Options: -h --help Print this screen and exit -v --version Print the version of crc-sus.py # Test: # 1. Make sure item exists # Constants/Parameters, modify these # The magical mystical docopt line # Connect to the database and get the limits table # Absolute path //// # Check that account exists # Print out SUs | 2.486018 | 2 |
pyrender/__init__.py | HotShot0901/PyRender | 0 | 6614640 | <gh_stars>0
from .core import *
from .vectors import *
| from .core import *
from .vectors import * | none | 1 | 1.088784 | 1 | |
tornado_proxy/settings.py | linuxhenhao/tornado_proxy | 1 | 6614641 | #!/usr/bin/env python
# https option
https_enabled = False
# if https enabled,
'''
url_rules are constructed by
(front end host description, target host description)
tuples, the description string is constituted by colon delimited
scheme and host string, eg "https:www.baidu.com".
'''
url_rules = [
# ('https:test.com', 'https:scholar.google.com'),
]
'''
the key in self_resolve is host string, and the value is a
list contains ip string whether ipv4 or ipv6 address is ok.
when redirect request, if the target host in request after
redirect is in this dict, one of the ip address in the list
will be randomly choosed to fetch data from.
'''
self_resolve = {
# 'abc.com': ['1.1.1.1', '2.2.2.2'],
}
# allow tornado.httpclient using ipv6 to fetch if ipv6 is
# available both in proxy host and target host
allow_ipv6 = True
'''
filter_patterns: a django url_patterns like list, regular expression will be used
to match which filter will be applied to that response
'''
filter_patterns = [
# (r'^scholar\.google\.\w+', 'tornado_proxy.filter.google'),
]
| #!/usr/bin/env python
# https option
https_enabled = False
# if https enabled,
'''
url_rules are constructed by
(front end host description, target host description)
tuples, the description string is constituted by colon delimited
scheme and host string, eg "https:www.baidu.com".
'''
url_rules = [
# ('https:test.com', 'https:scholar.google.com'),
]
'''
the key in self_resolve is host string, and the value is a
list contains ip string whether ipv4 or ipv6 address is ok.
when redirect request, if the target host in request after
redirect is in this dict, one of the ip address in the list
will be randomly choosed to fetch data from.
'''
self_resolve = {
# 'abc.com': ['1.1.1.1', '2.2.2.2'],
}
# allow tornado.httpclient using ipv6 to fetch if ipv6 is
# available both in proxy host and target host
allow_ipv6 = True
'''
filter_patterns: a django url_patterns like list, regular expression will be used
to match which filter will be applied to that response
'''
filter_patterns = [
# (r'^scholar\.google\.\w+', 'tornado_proxy.filter.google'),
]
| en | 0.80632 | #!/usr/bin/env python # https option # if https enabled, url_rules are constructed by (front end host description, target host description) tuples, the description string is constituted by colon delimited scheme and host string, eg "https:www.baidu.com". # ('https:test.com', 'https:scholar.google.com'), the key in self_resolve is host string, and the value is a list contains ip string whether ipv4 or ipv6 address is ok. when redirect request, if the target host in request after redirect is in this dict, one of the ip address in the list will be randomly choosed to fetch data from. # 'abc.com': ['1.1.1.1', '2.2.2.2'], # allow tornado.httpclient using ipv6 to fetch if ipv6 is # available both in proxy host and target host filter_patterns: a django url_patterns like list, regular expression will be used to match which filter will be applied to that response # (r'^scholar\.google\.\w+', 'tornado_proxy.filter.google'), | 2.874303 | 3 |
eshop/src/e_shop/alembic/versions/19602cc4f40_add_column_auth_user_id_to_table_user.py | zhenglong/eshop | 0 | 6614642 | <filename>eshop/src/e_shop/alembic/versions/19602cc4f40_add_column_auth_user_id_to_table_user.py<gh_stars>0
"""add column auth_user_id to table user
Revision ID: 19602cc4f40
Revises: <KEY>
Create Date: 2015-06-28 01:15:16.507924
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Column, Integer, ForeignKey, Boolean
def upgrade():
op.add_column('user', Column('auth_user_id', Integer, ForeignKey('auth_user.id')))
pass
def downgrade():
op.drop_constraint('user_ibfk_1', 'user', 'foreignkey')
op.drop_column('user', 'auth_user_id')
pass
| <filename>eshop/src/e_shop/alembic/versions/19602cc4f40_add_column_auth_user_id_to_table_user.py<gh_stars>0
"""add column auth_user_id to table user
Revision ID: 19602cc4f40
Revises: <KEY>
Create Date: 2015-06-28 01:15:16.507924
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy import Column, Integer, ForeignKey, Boolean
def upgrade():
op.add_column('user', Column('auth_user_id', Integer, ForeignKey('auth_user.id')))
pass
def downgrade():
op.drop_constraint('user_ibfk_1', 'user', 'foreignkey')
op.drop_column('user', 'auth_user_id')
pass
| en | 0.423562 | add column auth_user_id to table user Revision ID: 19602cc4f40 Revises: <KEY> Create Date: 2015-06-28 01:15:16.507924 # revision identifiers, used by Alembic. | 1.156647 | 1 |
Code/utilsKTP1/calculateStringDistance.py | peiyong-addwater/COMP90049_Knowledge_Technology_Project_1 | 0 | 6614643 | import nltk
# String edit distance (Levenshtein), see https://en.wikipedia.org/wiki/Levenshtein_distance
def editDistance(word1, word2):
return nltk.edit_distance(word1, word2)
# Jaccard distance between two words. See https://en.wikipedia.org/wiki/Jaccard_index
def jaccardDistance(word1, word2):
return nltk.jaccard_distance(set(word1), set(word2))
# Jaccard distance with n-gram
def jaccardDistanceNGram(word1, word2, n=3):
w1_chars = nltk.ngrams(word1, n, pad_left=True, pad_right=True, left_pad_symbol=' ', right_pad_symbol=' ')
w2_chars = nltk.ngrams(word2, n, pad_left=True, pad_right=True, left_pad_symbol=' ', right_pad_symbol=' ')
return nltk.jaccard_distance(set(w1_chars), set(w2_chars))
| import nltk
# String edit distance (Levenshtein), see https://en.wikipedia.org/wiki/Levenshtein_distance
def editDistance(word1, word2):
return nltk.edit_distance(word1, word2)
# Jaccard distance between two words. See https://en.wikipedia.org/wiki/Jaccard_index
def jaccardDistance(word1, word2):
return nltk.jaccard_distance(set(word1), set(word2))
# Jaccard distance with n-gram
def jaccardDistanceNGram(word1, word2, n=3):
w1_chars = nltk.ngrams(word1, n, pad_left=True, pad_right=True, left_pad_symbol=' ', right_pad_symbol=' ')
w2_chars = nltk.ngrams(word2, n, pad_left=True, pad_right=True, left_pad_symbol=' ', right_pad_symbol=' ')
return nltk.jaccard_distance(set(w1_chars), set(w2_chars))
| en | 0.726005 | # String edit distance (Levenshtein), see https://en.wikipedia.org/wiki/Levenshtein_distance # Jaccard distance between two words. See https://en.wikipedia.org/wiki/Jaccard_index # Jaccard distance with n-gram | 3.710665 | 4 |
utils/plot_utils.py | emavroudi/jsalt18-actrec-lab | 6 | 6614644 | import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from numpy import array, mean, unique, vstack
from os.path import join
mpl.rcParams.update({'font.size': 18})
def my_plot(vector, xlabel_str=None, ylabel_str=None, title_str=None,
output_file=None):
plt.plot(vector)
if xlabel_str is not None:
plt.xlabel(xlabel_str)
if ylabel_str is not None:
plt.ylabel(ylabel_str)
if title_str is not None:
plt.title(title_str)
if output_file is not None:
plt.savefig(output_file)
def imshow_(x, **kwargs):
if x.ndim == 2:
im = plt.imshow(x, interpolation="nearest", **kwargs)
elif x.ndim == 1:
im = plt.imshow(x[:,None].T, interpolation="nearest", **kwargs)
plt.yticks([])
plt.axis("tight")
return im
def viz_sequence_predictions(nb_classes, split, y_pred, y_true, output_file):
# # Output all truth/prediction pairs
plt.figure(split, figsize=(20, 10))
n_test = len(y_true)
P_test_ = array(y_pred) / float(nb_classes - 1)
y_test_ = array(y_true) / float(nb_classes - 1)
values = []
for i in range(len(y_true)):
P_tmp = vstack([y_test_[i][:], P_test_[i][:]])
plt.subplot(n_test, 1, i + 1)
im = imshow_(P_tmp, vmin=0, vmax=1, cmap=plt.cm.jet)
plt.xticks([])
plt.yticks([])
acc = mean(y_true[i] == y_pred[i]) * 100
plt.ylabel("{:.01f}".format(acc))
values.append(unique(P_tmp.ravel()))
print("Visualized predictions")
plt.savefig(output_file)
plt.clf()
def plot_label_seq(label_seq, nb_classes, y_label=None, actions=None,
cmap='rainbow', output_file=None, title=None,
legend=None, figsize=None):
if figsize is None:
figsize = (20, 2)
# Output all truth/prediction pairs
actions_in_seq = unique(label_seq)
fig = plt.figure(figsize=figsize)
norm_label_seq = array(label_seq) / float(nb_classes-1)
im = imshow_(norm_label_seq, vmin=0, vmax=1, cmap=plt.get_cmap(cmap))
if y_label is not None:
plt.ylabel("{}".format(y_label))
if title is not None:
plt.title(title)
if legend is not None:
values = unique(norm_label_seq.ravel())
# get the colors of the values, according to the
# colormap used by imshow
colors = [im.cmap(im.norm(value)) for value in values]
# create a patch (proxy artist) for every color
if actions is None:
patches = [
mpatches.Patch(color=colors[i],
label="Action {}".format(values[i]))
for i in range(len(values))]
else:
patches = [
mpatches.Patch(color=colors[i],
label="{}".format(actions[actions_in_seq[i]]))
for i in range(len(values))]
# put those patched as legend-handles into the legend
lgd = plt.legend(handles=patches, bbox_to_anchor=(1.2, 0.5),
loc='center right', borderaxespad=0.)
if output_file is not None:
if legend is not None:
plt.savefig(output_file, dpi=300,
bbox_extra_artists=(lgd,), bbox_inches='tight')
else:
plt.savefig(output_file, dpi=300, bbox_inches='tight')
plt.clf()
plt.close(fig)
def plot_optimization_log_frame(optimization_log, output_dir,
nb_epochs=None):
# Plot frame loss
output_file = join(output_dir, 'frame_loss.png')
variables = ['train_frame_loss', 'val_frame_loss']
linestyles = ['-', ':']
colors = ['b', 'r']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame train loss
output_file = join(output_dir, 'train_frame_loss.png')
variables = ['train_frame_loss']
linestyles = ['-']
colors = ['b']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame validation loss
output_file = join(output_dir, 'val_frame_loss.png')
variables = ['val_frame_loss']
linestyles = [':']
colors = ['r']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame train metrics
output_file = join(output_dir, 'train_frame_metric.png')
variables = ['train_frame_metric']
linestyles = ['-']
colors = ['b']
title = 'Frame metric'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame val metrics
output_file = join(output_dir, 'val_frame_metric.png')
variables = ['val_frame_metric']
linestyles = [':']
colors = ['r']
title = 'Frame metric'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
def plot_lines(variables, lines_dict, linestyles=None, colors=None,
title=None, output_file=None, nb_epochs=None,
xlabel=None):
# Plot
var_cnt = 0
legends = []
for variable in variables:
x = lines_dict[variable]
if nb_epochs is None:
nb_epochs = len(x)
else:
nb_epochs = min(len(x), nb_epochs)
if linestyles is None:
linestyle = '-'
else:
linestyle = linestyles[var_cnt]
if colors is None:
color = 'b'
else:
color = colors[var_cnt]
plt.plot(range(0, nb_epochs), x[:nb_epochs],
linestyle=linestyle, color=color)
legends.append(variable)
var_cnt += 1
plt.title(title)
if xlabel is None:
xlabel = 'Epochs'
plt.xlabel(xlabel)
plt.legend(legends, loc='best')
if output_file is not None:
plt.savefig(output_file)
# plt.show()
plt.clf()
| import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from numpy import array, mean, unique, vstack
from os.path import join
mpl.rcParams.update({'font.size': 18})
def my_plot(vector, xlabel_str=None, ylabel_str=None, title_str=None,
output_file=None):
plt.plot(vector)
if xlabel_str is not None:
plt.xlabel(xlabel_str)
if ylabel_str is not None:
plt.ylabel(ylabel_str)
if title_str is not None:
plt.title(title_str)
if output_file is not None:
plt.savefig(output_file)
def imshow_(x, **kwargs):
if x.ndim == 2:
im = plt.imshow(x, interpolation="nearest", **kwargs)
elif x.ndim == 1:
im = plt.imshow(x[:,None].T, interpolation="nearest", **kwargs)
plt.yticks([])
plt.axis("tight")
return im
def viz_sequence_predictions(nb_classes, split, y_pred, y_true, output_file):
# # Output all truth/prediction pairs
plt.figure(split, figsize=(20, 10))
n_test = len(y_true)
P_test_ = array(y_pred) / float(nb_classes - 1)
y_test_ = array(y_true) / float(nb_classes - 1)
values = []
for i in range(len(y_true)):
P_tmp = vstack([y_test_[i][:], P_test_[i][:]])
plt.subplot(n_test, 1, i + 1)
im = imshow_(P_tmp, vmin=0, vmax=1, cmap=plt.cm.jet)
plt.xticks([])
plt.yticks([])
acc = mean(y_true[i] == y_pred[i]) * 100
plt.ylabel("{:.01f}".format(acc))
values.append(unique(P_tmp.ravel()))
print("Visualized predictions")
plt.savefig(output_file)
plt.clf()
def plot_label_seq(label_seq, nb_classes, y_label=None, actions=None,
cmap='rainbow', output_file=None, title=None,
legend=None, figsize=None):
if figsize is None:
figsize = (20, 2)
# Output all truth/prediction pairs
actions_in_seq = unique(label_seq)
fig = plt.figure(figsize=figsize)
norm_label_seq = array(label_seq) / float(nb_classes-1)
im = imshow_(norm_label_seq, vmin=0, vmax=1, cmap=plt.get_cmap(cmap))
if y_label is not None:
plt.ylabel("{}".format(y_label))
if title is not None:
plt.title(title)
if legend is not None:
values = unique(norm_label_seq.ravel())
# get the colors of the values, according to the
# colormap used by imshow
colors = [im.cmap(im.norm(value)) for value in values]
# create a patch (proxy artist) for every color
if actions is None:
patches = [
mpatches.Patch(color=colors[i],
label="Action {}".format(values[i]))
for i in range(len(values))]
else:
patches = [
mpatches.Patch(color=colors[i],
label="{}".format(actions[actions_in_seq[i]]))
for i in range(len(values))]
# put those patched as legend-handles into the legend
lgd = plt.legend(handles=patches, bbox_to_anchor=(1.2, 0.5),
loc='center right', borderaxespad=0.)
if output_file is not None:
if legend is not None:
plt.savefig(output_file, dpi=300,
bbox_extra_artists=(lgd,), bbox_inches='tight')
else:
plt.savefig(output_file, dpi=300, bbox_inches='tight')
plt.clf()
plt.close(fig)
def plot_optimization_log_frame(optimization_log, output_dir,
nb_epochs=None):
# Plot frame loss
output_file = join(output_dir, 'frame_loss.png')
variables = ['train_frame_loss', 'val_frame_loss']
linestyles = ['-', ':']
colors = ['b', 'r']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame train loss
output_file = join(output_dir, 'train_frame_loss.png')
variables = ['train_frame_loss']
linestyles = ['-']
colors = ['b']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame validation loss
output_file = join(output_dir, 'val_frame_loss.png')
variables = ['val_frame_loss']
linestyles = [':']
colors = ['r']
title = 'Frame loss'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame train metrics
output_file = join(output_dir, 'train_frame_metric.png')
variables = ['train_frame_metric']
linestyles = ['-']
colors = ['b']
title = 'Frame metric'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
# Plot frame val metrics
output_file = join(output_dir, 'val_frame_metric.png')
variables = ['val_frame_metric']
linestyles = [':']
colors = ['r']
title = 'Frame metric'
plot_lines(variables=variables,
lines_dict=optimization_log, linestyles=linestyles,
colors=colors, title=title,
output_file=output_file, nb_epochs=nb_epochs)
def plot_lines(variables, lines_dict, linestyles=None, colors=None,
title=None, output_file=None, nb_epochs=None,
xlabel=None):
# Plot
var_cnt = 0
legends = []
for variable in variables:
x = lines_dict[variable]
if nb_epochs is None:
nb_epochs = len(x)
else:
nb_epochs = min(len(x), nb_epochs)
if linestyles is None:
linestyle = '-'
else:
linestyle = linestyles[var_cnt]
if colors is None:
color = 'b'
else:
color = colors[var_cnt]
plt.plot(range(0, nb_epochs), x[:nb_epochs],
linestyle=linestyle, color=color)
legends.append(variable)
var_cnt += 1
plt.title(title)
if xlabel is None:
xlabel = 'Epochs'
plt.xlabel(xlabel)
plt.legend(legends, loc='best')
if output_file is not None:
plt.savefig(output_file)
# plt.show()
plt.clf()
| en | 0.75239 | # # Output all truth/prediction pairs # Output all truth/prediction pairs # get the colors of the values, according to the # colormap used by imshow # create a patch (proxy artist) for every color # put those patched as legend-handles into the legend # Plot frame loss # Plot frame train loss # Plot frame validation loss # Plot frame train metrics # Plot frame val metrics # Plot # plt.show() | 2.212494 | 2 |
src/monte_carlo/mc.py | johannesharmse/move_37_course | 1 | 6614645 | <filename>src/monte_carlo/mc.py
"""
General purpose Monte Carlo model for training on-policy methods.
"""
from copy import deepcopy
import numpy as np
class FiniteMCModel:
def __init__(self, state_space, action_space, gamma=1.0, epsilon=0.1):
"""MCModel takes in state_space and action_space (finite)
Arguments
---------
state_space: int OR list[observation], where observation is any hashable type from env's obs.
action_space: int OR list[action], where action is any hashable type from env's actions.
gamma: float, discounting factor.
epsilon: float, epsilon-greedy parameter.
If the parameter is an int, then we generate a list, and otherwise we generate a dictionary.
>>> m = FiniteMCModel(2,3,epsilon=0)
>>> m.Q
[[0, 0, 0], [0, 0, 0]]
>>> m.Q[0][1] = 1
>>> m.Q
[[0, 1, 0], [0, 0, 0]]
>>> m.pi(1, 0)
1
>>> m.pi(1, 1)
0
>>> d = m.generate_returns([(0,0,0), (0,1,1), (1,0,1)])
>>> assert(d == {(1, 0): 1, (0, 1): 2, (0, 0): 2})
>>> m.choose_action(m.pi, 1)
0
"""
self.gamma = gamma
self.epsilon = epsilon
self.Q = None
if isinstance(action_space, int):
self.action_space = np.arange(action_space)
actions = [0]*action_space
# Action representation
self._act_rep = "list"
else:
self.action_space = action_space
actions = {k:0 for k in action_space}
self._act_rep = "dict"
if isinstance(state_space, int):
self.state_space = np.arange(state_space)
self.Q = [deepcopy(actions) for _ in range(state_space)]
else:
self.state_space = state_space
self.Q = {k:deepcopy(actions) for k in state_space}
# Frequency of state/action.
self.Ql = deepcopy(self.Q)
def pi(self, action, state):
"""pi(a,s,A,V) := pi(a|s)
We take the argmax_a of Q(s,a).
q[s] = [q(s,0), q(s,1), ...]
"""
if self._act_rep == "list":
if action == np.argmax(self.Q[state]):
return 1
return 0
elif self._act_rep == "dict":
if action == max(self.Q[state], key=self.Q[state].get):
return 1
return 0
def b(self, action, state):
"""b(a,s,A) := b(a|s)
Sometimes you can only use a subset of the action space
given the state.
Randomly selects an action from a uniform distribution.
"""
return self.epsilon/len(self.action_space) + (1-self.epsilon) * self.pi(action, state)
def generate_returns(self, ep):
"""Backup on returns per time period in an epoch
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
G = {} # return on state
C = 0 # cumulative reward
for tpl in reversed(ep):
observation, action, reward = tpl
G[(observation, action)] = C = reward + self.gamma*C
return G
def choose_action(self, policy, state):
"""Uses specified policy to select an action randomly given the state.
Arguments
---------
policy: function, can be self.pi, or self.b, or another custom policy.
state: observation of the environment.
"""
probs = [policy(a, state) for a in self.action_space]
return np.random.choice(self.action_space, p=probs)
def update_Q(self, ep):
"""Performs a action-value update.
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
# Generate returns, return ratio
G = self.generate_returns(ep)
for s in G:
state, action = s
q = self.Q[state][action]
self.Ql[state][action] += 1
N = self.Ql[state][action]
self.Q[state][action] = q * N/(N+1) + G[s]/(N+1)
def score(self, env, policy, n_samples=1000):
"""Evaluates a specific policy with regards to the env.
Arguments
---------
env: an openai gym env, or anything that follows the api.
policy: a function, could be self.pi, self.b, etc.
"""
rewards = []
for _ in range(n_samples):
observation = env.reset()
cum_rewards = 0
while True:
action = self.choose_action(policy, observation)
observation, reward, done, _ = env.step(action)
cum_rewards += reward
if done:
rewards.append(cum_rewards)
break
return np.mean(rewards)
if __name__ == "__main__":
import doctest
doctest.testmod() | <filename>src/monte_carlo/mc.py
"""
General purpose Monte Carlo model for training on-policy methods.
"""
from copy import deepcopy
import numpy as np
class FiniteMCModel:
def __init__(self, state_space, action_space, gamma=1.0, epsilon=0.1):
"""MCModel takes in state_space and action_space (finite)
Arguments
---------
state_space: int OR list[observation], where observation is any hashable type from env's obs.
action_space: int OR list[action], where action is any hashable type from env's actions.
gamma: float, discounting factor.
epsilon: float, epsilon-greedy parameter.
If the parameter is an int, then we generate a list, and otherwise we generate a dictionary.
>>> m = FiniteMCModel(2,3,epsilon=0)
>>> m.Q
[[0, 0, 0], [0, 0, 0]]
>>> m.Q[0][1] = 1
>>> m.Q
[[0, 1, 0], [0, 0, 0]]
>>> m.pi(1, 0)
1
>>> m.pi(1, 1)
0
>>> d = m.generate_returns([(0,0,0), (0,1,1), (1,0,1)])
>>> assert(d == {(1, 0): 1, (0, 1): 2, (0, 0): 2})
>>> m.choose_action(m.pi, 1)
0
"""
self.gamma = gamma
self.epsilon = epsilon
self.Q = None
if isinstance(action_space, int):
self.action_space = np.arange(action_space)
actions = [0]*action_space
# Action representation
self._act_rep = "list"
else:
self.action_space = action_space
actions = {k:0 for k in action_space}
self._act_rep = "dict"
if isinstance(state_space, int):
self.state_space = np.arange(state_space)
self.Q = [deepcopy(actions) for _ in range(state_space)]
else:
self.state_space = state_space
self.Q = {k:deepcopy(actions) for k in state_space}
# Frequency of state/action.
self.Ql = deepcopy(self.Q)
def pi(self, action, state):
"""pi(a,s,A,V) := pi(a|s)
We take the argmax_a of Q(s,a).
q[s] = [q(s,0), q(s,1), ...]
"""
if self._act_rep == "list":
if action == np.argmax(self.Q[state]):
return 1
return 0
elif self._act_rep == "dict":
if action == max(self.Q[state], key=self.Q[state].get):
return 1
return 0
def b(self, action, state):
"""b(a,s,A) := b(a|s)
Sometimes you can only use a subset of the action space
given the state.
Randomly selects an action from a uniform distribution.
"""
return self.epsilon/len(self.action_space) + (1-self.epsilon) * self.pi(action, state)
def generate_returns(self, ep):
"""Backup on returns per time period in an epoch
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
G = {} # return on state
C = 0 # cumulative reward
for tpl in reversed(ep):
observation, action, reward = tpl
G[(observation, action)] = C = reward + self.gamma*C
return G
def choose_action(self, policy, state):
"""Uses specified policy to select an action randomly given the state.
Arguments
---------
policy: function, can be self.pi, or self.b, or another custom policy.
state: observation of the environment.
"""
probs = [policy(a, state) for a in self.action_space]
return np.random.choice(self.action_space, p=probs)
def update_Q(self, ep):
"""Performs a action-value update.
Arguments
---------
ep: [(observation, action, reward)], an episode trajectory in chronological order.
"""
# Generate returns, return ratio
G = self.generate_returns(ep)
for s in G:
state, action = s
q = self.Q[state][action]
self.Ql[state][action] += 1
N = self.Ql[state][action]
self.Q[state][action] = q * N/(N+1) + G[s]/(N+1)
def score(self, env, policy, n_samples=1000):
"""Evaluates a specific policy with regards to the env.
Arguments
---------
env: an openai gym env, or anything that follows the api.
policy: a function, could be self.pi, self.b, etc.
"""
rewards = []
for _ in range(n_samples):
observation = env.reset()
cum_rewards = 0
while True:
action = self.choose_action(policy, observation)
observation, reward, done, _ = env.step(action)
cum_rewards += reward
if done:
rewards.append(cum_rewards)
break
return np.mean(rewards)
if __name__ == "__main__":
import doctest
doctest.testmod() | en | 0.642224 | General purpose Monte Carlo model for training on-policy methods. MCModel takes in state_space and action_space (finite) Arguments --------- state_space: int OR list[observation], where observation is any hashable type from env's obs. action_space: int OR list[action], where action is any hashable type from env's actions. gamma: float, discounting factor. epsilon: float, epsilon-greedy parameter. If the parameter is an int, then we generate a list, and otherwise we generate a dictionary. >>> m = FiniteMCModel(2,3,epsilon=0) >>> m.Q [[0, 0, 0], [0, 0, 0]] >>> m.Q[0][1] = 1 >>> m.Q [[0, 1, 0], [0, 0, 0]] >>> m.pi(1, 0) 1 >>> m.pi(1, 1) 0 >>> d = m.generate_returns([(0,0,0), (0,1,1), (1,0,1)]) >>> assert(d == {(1, 0): 1, (0, 1): 2, (0, 0): 2}) >>> m.choose_action(m.pi, 1) 0 # Action representation # Frequency of state/action. pi(a,s,A,V) := pi(a|s) We take the argmax_a of Q(s,a). q[s] = [q(s,0), q(s,1), ...] b(a,s,A) := b(a|s) Sometimes you can only use a subset of the action space given the state. Randomly selects an action from a uniform distribution. Backup on returns per time period in an epoch Arguments --------- ep: [(observation, action, reward)], an episode trajectory in chronological order. # return on state # cumulative reward Uses specified policy to select an action randomly given the state. Arguments --------- policy: function, can be self.pi, or self.b, or another custom policy. state: observation of the environment. Performs a action-value update. Arguments --------- ep: [(observation, action, reward)], an episode trajectory in chronological order. # Generate returns, return ratio Evaluates a specific policy with regards to the env. Arguments --------- env: an openai gym env, or anything that follows the api. policy: a function, could be self.pi, self.b, etc. | 2.987885 | 3 |
pyvlova/models/resnet18.py | ModelTC/pyvlova | 1 | 6614646 | # Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
from .utils import *
from ..op import CombinedOp, SequenceOp, ElementwiseAdd, Linear, ReLU
class BasicBlock(CombinedOp):
expansion = 1
def __init__(self, name, in_shape, out_channel, stride=1, downsample=None):
in_shape = shape2d(in_shape)
self.conv1 = conv(name + '.conv1', in_shape, out_channel, 3, stride, 1)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.conv2 = conv(name + '.conv2', self.conv1, out_channel, 3, 1, 1)
self.relu2 = mock(ReLU, name + '.relu2', self.conv2)
self.eltwise_add = mock(ElementwiseAdd, name + '.eltwise_add', self.conv2)
self.batch = self.relu2.batch
self.out_channel = self.relu2.channel
self.out_height = self.relu2.height
self.out_width = self.relu2.width
self.downsample = downsample
self.stride = stride
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def calc(self, x):
residual = x
out = self.conv1.calc(x)
out = self.relu1.calc(out)
out = self.conv2.calc(out)
if self.downsample is not None:
residual = self.downsample.calc(x)
out = self.eltwise_add.calc(out, residual)
out = self.relu2.calc(out)
return out
class Bottleneck(CombinedOp):
expansion = 4
def __init__(self, name, in_shape, out_channel, stride=1, downsample=None):
self.conv1 = conv(name + '.conv1', in_shape, out_channel, 1)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.conv2 = conv(name + '.conv2', self.conv1, out_channel, 3, stride, 1)
self.relu2 = mock(ReLU, name + '.relu2', self.conv2)
self.conv3 = conv(name + '.conv3', self.conv2, out_channel * 4, 1)
self.relu3 = mock(ReLU, name + '.relu3', self.conv3)
self.eltwise_add = mock(ElementwiseAdd, name + '.eltwise_add', self.conv3)
self.batch = self.relu3.batch
self.out_channel = self.relu3.channel
self.out_height = self.relu3.height
self.out_width = self.relu3.width
self.downsample = downsample
self.stride = stride
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def calc(self, x):
residual = x
out = self.conv1.calc(x)
out = self.relu1.calc(out)
out = self.conv2.calc(out)
out = self.relu2.calc(out)
out = self.conv3.calc(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.eltwise_add.calc(out, residual)
out = self.relu3.calc(out)
return out
class ResNet(CombinedOp):
def __init__(self, name, in_shape, block, layers, num_classes=1000, deep_stem=False, avg_down=False):
self.inplanes = 64
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
conv1 = conv(name + '.stem.conv1', in_shape, 32, 3, 2, 1)
relu1 = mock(ReLU, name + '.stem.relu1', conv1)
conv2 = conv(name + '.stem.conv2', conv1, 32, 3, 1, 1)
relu2 = mock(ReLU, name + '.stem.relu2', conv2)
conv3 = conv(name + '.stem.conv3', conv2, 64, 3, 1, 1)
self.conv1 = SequenceOp(name='.stem', ops=[conv1, relu1, conv2, relu2, conv3])
else:
self.conv1 = conv(name + '.conv1', in_shape, 64, 7, 2, 3)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.maxpool = pool(name + '.maxpool', self.relu1, 3, 2, 1, 'max')
self.layer1 = self._make_layer(name + '.layer1', self.maxpool, block, 64, layers[0])
self.layer2 = self._make_layer(name + '.layer2', self.layer1, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(name + '.layer3', self.layer2, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(name + '.layer4', self.layer3, block, 512, layers[3], stride=2)
self.avgpool = pool(name + '.avgpool', self.layer4, 7, 1, 0, 'avg')
self.flatten = flatten2d(name + '.flatten', self.avgpool)
self.fc = Linear(
batch=self.flatten.batch, in_channel=512 * block.expansion,
out_channel=num_classes, biased=True,
name=name + '.linear'
)
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def _make_layer(self, name, prev, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
raise NotImplemented
# downsample = nn.Sequential(
# nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
# nn.Conv2d(self.inplanes, planes * block.expansion,
# kernel_size=1, stride=1, bias=False),
# BN(planes * block.expansion),
# )
else:
downsample = conv(name + '.downsample', prev, planes * block.expansion, 1, stride)
layers = []
layers.append(block(name + '.' + str(len(layers)), prev, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(name + '.' + str(len(layers)), layers[-1], planes))
return SequenceOp(name=name, ops=layers)
def calc(self, x):
x = self.conv1.calc(x)
x = self.relu1.calc(x)
x = self.maxpool.calc(x)
x = self.layer1.calc(x)
x = self.layer2.calc(x)
x = self.layer3.calc(x)
x = self.layer4.calc(x)
x = self.avgpool.calc(x)
x = self.flatten.calc(x)
x = self.fc.calc(x)
return x
def resnet18(**kwargs):
model = ResNet('resnet18', [1, 3, 224, 224], BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| # Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
from .utils import *
from ..op import CombinedOp, SequenceOp, ElementwiseAdd, Linear, ReLU
class BasicBlock(CombinedOp):
expansion = 1
def __init__(self, name, in_shape, out_channel, stride=1, downsample=None):
in_shape = shape2d(in_shape)
self.conv1 = conv(name + '.conv1', in_shape, out_channel, 3, stride, 1)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.conv2 = conv(name + '.conv2', self.conv1, out_channel, 3, 1, 1)
self.relu2 = mock(ReLU, name + '.relu2', self.conv2)
self.eltwise_add = mock(ElementwiseAdd, name + '.eltwise_add', self.conv2)
self.batch = self.relu2.batch
self.out_channel = self.relu2.channel
self.out_height = self.relu2.height
self.out_width = self.relu2.width
self.downsample = downsample
self.stride = stride
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def calc(self, x):
residual = x
out = self.conv1.calc(x)
out = self.relu1.calc(out)
out = self.conv2.calc(out)
if self.downsample is not None:
residual = self.downsample.calc(x)
out = self.eltwise_add.calc(out, residual)
out = self.relu2.calc(out)
return out
class Bottleneck(CombinedOp):
expansion = 4
def __init__(self, name, in_shape, out_channel, stride=1, downsample=None):
self.conv1 = conv(name + '.conv1', in_shape, out_channel, 1)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.conv2 = conv(name + '.conv2', self.conv1, out_channel, 3, stride, 1)
self.relu2 = mock(ReLU, name + '.relu2', self.conv2)
self.conv3 = conv(name + '.conv3', self.conv2, out_channel * 4, 1)
self.relu3 = mock(ReLU, name + '.relu3', self.conv3)
self.eltwise_add = mock(ElementwiseAdd, name + '.eltwise_add', self.conv3)
self.batch = self.relu3.batch
self.out_channel = self.relu3.channel
self.out_height = self.relu3.height
self.out_width = self.relu3.width
self.downsample = downsample
self.stride = stride
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def calc(self, x):
residual = x
out = self.conv1.calc(x)
out = self.relu1.calc(out)
out = self.conv2.calc(out)
out = self.relu2.calc(out)
out = self.conv3.calc(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.eltwise_add.calc(out, residual)
out = self.relu3.calc(out)
return out
class ResNet(CombinedOp):
def __init__(self, name, in_shape, block, layers, num_classes=1000, deep_stem=False, avg_down=False):
self.inplanes = 64
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
conv1 = conv(name + '.stem.conv1', in_shape, 32, 3, 2, 1)
relu1 = mock(ReLU, name + '.stem.relu1', conv1)
conv2 = conv(name + '.stem.conv2', conv1, 32, 3, 1, 1)
relu2 = mock(ReLU, name + '.stem.relu2', conv2)
conv3 = conv(name + '.stem.conv3', conv2, 64, 3, 1, 1)
self.conv1 = SequenceOp(name='.stem', ops=[conv1, relu1, conv2, relu2, conv3])
else:
self.conv1 = conv(name + '.conv1', in_shape, 64, 7, 2, 3)
self.relu1 = mock(ReLU, name + '.relu1', self.conv1)
self.maxpool = pool(name + '.maxpool', self.relu1, 3, 2, 1, 'max')
self.layer1 = self._make_layer(name + '.layer1', self.maxpool, block, 64, layers[0])
self.layer2 = self._make_layer(name + '.layer2', self.layer1, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(name + '.layer3', self.layer2, block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(name + '.layer4', self.layer3, block, 512, layers[3], stride=2)
self.avgpool = pool(name + '.avgpool', self.layer4, 7, 1, 0, 'avg')
self.flatten = flatten2d(name + '.flatten', self.avgpool)
self.fc = Linear(
batch=self.flatten.batch, in_channel=512 * block.expansion,
out_channel=num_classes, biased=True,
name=name + '.linear'
)
ops = [v for v in self.__dict__.values() if isinstance(v, BaseOp)]
super().__init__(name=name, ops=ops)
def _make_layer(self, name, prev, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
raise NotImplemented
# downsample = nn.Sequential(
# nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
# nn.Conv2d(self.inplanes, planes * block.expansion,
# kernel_size=1, stride=1, bias=False),
# BN(planes * block.expansion),
# )
else:
downsample = conv(name + '.downsample', prev, planes * block.expansion, 1, stride)
layers = []
layers.append(block(name + '.' + str(len(layers)), prev, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(name + '.' + str(len(layers)), layers[-1], planes))
return SequenceOp(name=name, ops=layers)
def calc(self, x):
x = self.conv1.calc(x)
x = self.relu1.calc(x)
x = self.maxpool.calc(x)
x = self.layer1.calc(x)
x = self.layer2.calc(x)
x = self.layer3.calc(x)
x = self.layer4.calc(x)
x = self.avgpool.calc(x)
x = self.flatten.calc(x)
x = self.fc.calc(x)
return x
def resnet18(**kwargs):
model = ResNet('resnet18', [1, 3, 224, 224], BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| en | 0.48331 | # Copyright 2020 <NAME> # SPDX-License-Identifier: Apache-2.0 # downsample = nn.Sequential( # nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False), # nn.Conv2d(self.inplanes, planes * block.expansion, # kernel_size=1, stride=1, bias=False), # BN(planes * block.expansion), # ) | 2.260348 | 2 |
ml/predict.py | emiilbjorklund/tripLogger | 1 | 6614647 | <gh_stars>1-10
import numpy as np
import os
import scipy.ndimage
import imageio
from skimage.feature import hog
from skimage import data, color, exposure
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
knn = joblib.load('model/knn_model.pkl')
image = imageio.imread('dataSet/9/IMG_49421.png')
image = color.rgb2gray(image)
df= hog(image, orientations=8, pixels_per_cell=(10,10), cells_per_block=(5, 5))
predict = knn.predict(df.reshape(1,-1))[0]
print(predict)
| import numpy as np
import os
import scipy.ndimage
import imageio
from skimage.feature import hog
from skimage import data, color, exposure
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.externals import joblib
knn = joblib.load('model/knn_model.pkl')
image = imageio.imread('dataSet/9/IMG_49421.png')
image = color.rgb2gray(image)
df= hog(image, orientations=8, pixels_per_cell=(10,10), cells_per_block=(5, 5))
predict = knn.predict(df.reshape(1,-1))[0]
print(predict) | none | 1 | 2.811507 | 3 | |
advent/year2018/day3.py | davweb/advent-of-code | 0 | 6614648 | #!/usr/local/bin/python3
from collections import defaultdict
import re
PATTERN = re.compile(r"#(\d+) @ (\d+),(\d+): (\d+)x(\d+)")
class Claim:
def __init__(self, definition):
"""
>>> Claim("#1355 @ 102,538: 21x28")
Claim(id=1355, x=102, y=538, width=21, height=28)
>>> Claim("a")
Traceback (most recent call last):
...
ValueError: Invalid defintion 'a'
"""
match = PATTERN.match(definition)
if not match:
raise ValueError("Invalid defintion '{}'".format(definition))
self.id = int(match.group(1))
self.x = int(match.group(2))
self.y = int(match.group(3))
self.width = int(match.group(4))
self.height = int(match.group(5))
def squares(self):
"""
>>> list(Claim("#1 @ 0,0: 2x2").squares())
[(0, 0), (0, 1), (1, 0), (1, 1)]
>>> list(Claim("#2 @ 3,2: 1x1").squares())
[(3, 2)]
>>> list(Claim("#3 @ 4,4: 1x3").squares())
[(4, 4), (4, 5), (4, 6)]
>>> list(Claim("#4 @ 4,4: 3x1").squares())
[(4, 4), (5, 4), (6, 4)]
"""
for x in range(self.x, self.x + self.width):
for y in range(self.y, self.y + self.height):
yield (x, y)
def __repr__(self):
return "Claim(id={id}, x={x}, y={y}, width={width}, height={height})".format(**self.__dict__)
def read_input():
file = open('input/2018/day3-input.txt', 'r')
return [Claim(line) for line in file.readlines()]
def part1(claims):
"""
>>> part1(read_input())
116491
"""
grid = defaultdict(int)
for claim in claims:
for square in claim.squares():
grid[square] += 1
return sum(1 for value in grid.values() if value >= 2)
def part2(claims):
"""
>>> part2(read_input())
707
"""
grid = defaultdict(int)
for claim in claims:
for square in claim.squares():
grid[square] += 1
for claim in claims:
winner = True
for square in claim.squares():
if grid[square] > 1:
winner = False
break
if winner:
return claim.id
def main():
claims = read_input()
print(part1(claims))
print(part2(claims))
if __name__ == "__main__":
main()
| #!/usr/local/bin/python3
from collections import defaultdict
import re
PATTERN = re.compile(r"#(\d+) @ (\d+),(\d+): (\d+)x(\d+)")
class Claim:
def __init__(self, definition):
"""
>>> Claim("#1355 @ 102,538: 21x28")
Claim(id=1355, x=102, y=538, width=21, height=28)
>>> Claim("a")
Traceback (most recent call last):
...
ValueError: Invalid defintion 'a'
"""
match = PATTERN.match(definition)
if not match:
raise ValueError("Invalid defintion '{}'".format(definition))
self.id = int(match.group(1))
self.x = int(match.group(2))
self.y = int(match.group(3))
self.width = int(match.group(4))
self.height = int(match.group(5))
def squares(self):
"""
>>> list(Claim("#1 @ 0,0: 2x2").squares())
[(0, 0), (0, 1), (1, 0), (1, 1)]
>>> list(Claim("#2 @ 3,2: 1x1").squares())
[(3, 2)]
>>> list(Claim("#3 @ 4,4: 1x3").squares())
[(4, 4), (4, 5), (4, 6)]
>>> list(Claim("#4 @ 4,4: 3x1").squares())
[(4, 4), (5, 4), (6, 4)]
"""
for x in range(self.x, self.x + self.width):
for y in range(self.y, self.y + self.height):
yield (x, y)
def __repr__(self):
return "Claim(id={id}, x={x}, y={y}, width={width}, height={height})".format(**self.__dict__)
def read_input():
file = open('input/2018/day3-input.txt', 'r')
return [Claim(line) for line in file.readlines()]
def part1(claims):
"""
>>> part1(read_input())
116491
"""
grid = defaultdict(int)
for claim in claims:
for square in claim.squares():
grid[square] += 1
return sum(1 for value in grid.values() if value >= 2)
def part2(claims):
"""
>>> part2(read_input())
707
"""
grid = defaultdict(int)
for claim in claims:
for square in claim.squares():
grid[square] += 1
for claim in claims:
winner = True
for square in claim.squares():
if grid[square] > 1:
winner = False
break
if winner:
return claim.id
def main():
claims = read_input()
print(part1(claims))
print(part2(claims))
if __name__ == "__main__":
main()
| en | 0.423884 | #!/usr/local/bin/python3 >>> Claim("#1355 @ 102,538: 21x28") Claim(id=1355, x=102, y=538, width=21, height=28) >>> Claim("a") Traceback (most recent call last): ... ValueError: Invalid defintion 'a' >>> list(Claim("#1 @ 0,0: 2x2").squares()) [(0, 0), (0, 1), (1, 0), (1, 1)] >>> list(Claim("#2 @ 3,2: 1x1").squares()) [(3, 2)] >>> list(Claim("#3 @ 4,4: 1x3").squares()) [(4, 4), (4, 5), (4, 6)] >>> list(Claim("#4 @ 4,4: 3x1").squares()) [(4, 4), (5, 4), (6, 4)] >>> part1(read_input()) 116491 >>> part2(read_input()) 707 | 3.390888 | 3 |
exarl/envs/env_vault/ExaCOVID.py | schr476/EXARL | 2 | 6614649 | # This material was prepared as an account of work sponsored by an agency of the
# United States Government. Neither the United States Government nor the United
# States Department of Energy, nor Battelle, nor any of their employees, nor any
# jurisdiction or organization that has cooperated in the development of these
# materials, makes any warranty, express or implied, or assumes any legal
# liability or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights. Reference
# herein to any specific commercial product, process, or service by trade name,
# trademark, manufacturer, or otherwise does not necessarily constitute or imply
# its endorsement, recommendation, or favoring by the United States Government
# or any agency thereof, or Battelle Memorial Institute. The views and opinions
# of authors expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by
# BATTELLE
# for the
# UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
from gym import spaces
import numpy as np
import pandas as pd
import os
import sys
sys.path.append(os.path.dirname(__file__) + '/pydemic/')
from pydemic.data.united_states import nyt, get_population, get_age_distribution
from pydemic import MitigationModel
from pydemic.models.seirpp import SimulationResult
from pydemic.models import SEIRPlusPlusSimulation
import gym
class ExaCOVID(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, **kwargs):
super().__init__()
"""
"""
# self.cfg_data = super.get_config()
self.results_dir = ''
''' Initial key variable setup '''
self.episodes = 0
self.steps = 0
self.initial_cases = 200
self.icu_max = 1000
self.model_dt = 0.05
''' Define the model time scale for each step '''
self.time_init = 0 # [days] a month delay
self.mitigation_dt = 1 # [days]
self.mitigation_length = 7 # [day]
''' Mitigation factors is used as the action '''
self.mitigation = None
self.mitigation_times = [0, self.mitigation_length]
self.mitigation_factors = [0.5, 0.5]
''' Define the initial model parameters and distributions '''
self.state = "Illinois"
self.data = nyt(self.state)
self.total_population = get_population(self.state)
# print('self.total_population:{}'.format(self.total_population))
self.age_distribution = get_age_distribution()
# TODO: Use some initial time (Jan 1st, 2020)
self.tspan = ('2020-01-01', '2020-02-01')
self.date_max = pd.to_datetime('2020-06-01')
self.t0 = 0
self.tf = 8 * 30
''' Model default '''
self.y0 = {}
self.y0['infected'] = self.initial_cases * np.array(self.age_distribution)
self.y0['susceptible'] = (
self.total_population * np.array(self.age_distribution) - self.y0['infected']
)
# print('Total infected:{}'.format(self.y0['infected'][:].sum()))
from pydemic.distributions import GammaDistribution
self.parameters = dict(
ifr=.003,
r0=2.3,
serial_dist=GammaDistribution(mean=4, std=3.25),
seasonal_forcing_amp=.1,
peak_day=15,
incubation_dist=GammaDistribution(5.5, 2),
p_symptomatic=np.array([0.057, 0.054, 0.294, 0.668, 0.614, 0.83,
0.99, 0.995, 0.999]),
# p_positive=1.5,
hospitalized_dist=GammaDistribution(6.5, 1.6),
p_hospitalized=np.array([0.001, 0.003, 0.012, 0.032, 0.049, 0.102,
0.166, 0.243, 0.273]),
discharged_dist=GammaDistribution(9, 6),
critical_dist=GammaDistribution(3, 1),
p_critical=.9 * np.array([0.05, 0.05, 0.05, 0.05, 0.063, 0.122,
0.274, 0.432, 0.709]),
dead_dist=GammaDistribution(7.5, 5.),
p_dead=1.2 * np.array([0.3, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4, 0.5, 0.5]),
recovered_dist=GammaDistribution(9, 2.2),
all_dead_dist=GammaDistribution(3, 3),
all_dead_multiplier=1.,
)
self.state_variables = SEIRPlusPlusSimulation.increment_keys
self.nstates = len(self.state_variables)
# print('Variables:{}'.format(self.state_variables))
self.observation_space = spaces.Box(low=np.zeros(self.nstates),
high=np.ones(self.nstates),
dtype=np.float32)
# Increase, Decrease, Don't change
self.action_factors = [0, 0.01, 0.05, 0.1, -0.01, -0.05, -0.1]
self.action_space = spaces.Discrete(7)
def step(self, action):
# print('step()')
''' Initial step variables '''
done = False
reward = 0
info = ''
self.steps += 1
''' Add new mitigation times '''
self.mitigation_times.append(self.steps * self.mitigation_length)
self.mitigation_times.append(self.mitigation_times[-1] + self.mitigation_dt)
''' Added previous mitgation values '''
self.mitigation_factors.append(self.mitigation_factors[-1])
''' Add new mitigation value '''
new_factor = self.mitigation_factors[-1] + self.action_factors[action]
self.mitigation_factors.append(new_factor)
''' Out of bounds'''
if self.mitigation_factors[-1] > 1:
done = True
reward = -99
info = 'Out of bounds (upper)'
if self.mitigation_factors[-1] < 0:
done = True
reward = -99
info = 'Out of bounds (lower)'
''' Create mitigation model time span '''
tspan_tmp0 = self.tspan[0]
tspan_tmp1 = (pd.to_datetime(self.tspan[0]) + (self.steps + 1) * self.mitigation_length * pd.Timedelta('1D')).strftime('%Y-%m-%d')
self.tspan = (tspan_tmp0, tspan_tmp1)
# print('tspan:{}'.format(self.tspan))
''' Create mitigation model time span '''
self.t0, self.tf = 0, self.steps * self.mitigation_length
''' New mitigation policy '''
print('mitigation times:{}'.format(self.mitigation_times))
print('mitigation factors:{}'.format(self.mitigation_factors))
self.mitigation = MitigationModel(self.t0, self.tf, self.mitigation_times, self.mitigation_factors)
''' Run the model with update mitigation trace '''
sim = SEIRPlusPlusSimulation(self.total_population, self.age_distribution,
mitigation=self.mitigation, **self.parameters)
self.result = sim(self.tspan, self.y0, self.model_dt)
# for key in self.result.y.keys():
# print('{}: {}'.format(key, self.result.y[key].sum(axis=1)[-1]))
total_icu = self.result.y['icu'].sum(axis=1)[-1]
if total_icu > self.icu_max:
reward = -499
done = True
info = 'Exceeded the infection capacity'
# Calculate the reward
if done != True:
reward = total_icu / (self.icu_max + 1)
if pd.to_datetime(self.tspan[1]) >= self.date_max:
done = True
info = 'Reached the max date'
# Convert dict to state array
next_state = np.array([self.result.y[key][:][-1].sum() for key in self.state_variables])
next_state /= self.total_population
##
if self.steps > 1:
self.render()
return next_state, reward, done, info
def reset(self):
self.episodes += 1
self.steps = 0
self.mitigation_times = [0, self.mitigation_dt]
self.mitigation_factors = [0.5, 0.5]
self.total_population = get_population(self.state)
# print('self.total_population:{}'.format(self.total_population))
##
t0, tf = 0, self.mitigation_length # TODO: What range should consider ?? ##
mitigation = MitigationModel(t0, tf, self.mitigation_times, self.mitigation_factors)
sim = SEIRPlusPlusSimulation(self.total_population, self.age_distribution,
mitigation=mitigation, **self.parameters)
tspan_tmp0 = '2020-01-01'
tspan_tmp1 = (pd.to_datetime(tspan_tmp0) + self.mitigation_length * pd.Timedelta('1D')).strftime('%Y-%m-%d')
self.tspan = (tspan_tmp0, tspan_tmp1)
self.result = sim(self.tspan, self.y0, self.model_dt)
# print('variables:{}'.format(self.result.y.keys()))
next_state = np.array([self.result.y[key][-1][-1] for key in self.state_variables])
next_state /= self.total_population
return next_state
def render(self):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
plt.rcParams['font.family'] = [u'serif']
plt.rcParams['font.size'] = 16
fig, ax = plt.subplots(2, figsize=(18, 12))
''' Migigation strategy '''
filename = self.results_dir + 'covid_render_episode{}'.format(self.episodes)
_t = np.linspace(self.t0, self.tf, 1000)
ax[0].plot(_t, self.mitigation(_t))
''' Results '''
# filename = self.results_dir + 'sim_episode{}'.format(self.episodes)
plot_compartments = [
'icu',
'infected',
'positive',
'all_dead',
'hospitalized',
]
# fig, ax = plt.subplots(figsize=(10, 6))
for name in plot_compartments:
# print(result.y[name].shape)
ax[1].plot(self.result.t,
(self.result.y[name].sum(axis=1)),
label=name)
ax[1].plot()
# plot on y log scale
ax[1].set_yscale('log')
ax[1].set_ylim(ymin=0.8)
# plot x axis as dates
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d'))
# fig.autofmt_xdate()
# create legend
ax[1].legend(loc='center left', bbox_to_anchor=(1, .5))
ax[1].set_xlabel('time')
ax[1].set_ylabel('count (persons)')
plt.savefig(filename)
return 0
| # This material was prepared as an account of work sponsored by an agency of the
# United States Government. Neither the United States Government nor the United
# States Department of Energy, nor Battelle, nor any of their employees, nor any
# jurisdiction or organization that has cooperated in the development of these
# materials, makes any warranty, express or implied, or assumes any legal
# liability or responsibility for the accuracy, completeness, or usefulness or
# any information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights. Reference
# herein to any specific commercial product, process, or service by trade name,
# trademark, manufacturer, or otherwise does not necessarily constitute or imply
# its endorsement, recommendation, or favoring by the United States Government
# or any agency thereof, or Battelle Memorial Institute. The views and opinions
# of authors expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by
# BATTELLE
# for the
# UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
from gym import spaces
import numpy as np
import pandas as pd
import os
import sys
sys.path.append(os.path.dirname(__file__) + '/pydemic/')
from pydemic.data.united_states import nyt, get_population, get_age_distribution
from pydemic import MitigationModel
from pydemic.models.seirpp import SimulationResult
from pydemic.models import SEIRPlusPlusSimulation
import gym
class ExaCOVID(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, **kwargs):
super().__init__()
"""
"""
# self.cfg_data = super.get_config()
self.results_dir = ''
''' Initial key variable setup '''
self.episodes = 0
self.steps = 0
self.initial_cases = 200
self.icu_max = 1000
self.model_dt = 0.05
''' Define the model time scale for each step '''
self.time_init = 0 # [days] a month delay
self.mitigation_dt = 1 # [days]
self.mitigation_length = 7 # [day]
''' Mitigation factors is used as the action '''
self.mitigation = None
self.mitigation_times = [0, self.mitigation_length]
self.mitigation_factors = [0.5, 0.5]
''' Define the initial model parameters and distributions '''
self.state = "Illinois"
self.data = nyt(self.state)
self.total_population = get_population(self.state)
# print('self.total_population:{}'.format(self.total_population))
self.age_distribution = get_age_distribution()
# TODO: Use some initial time (Jan 1st, 2020)
self.tspan = ('2020-01-01', '2020-02-01')
self.date_max = pd.to_datetime('2020-06-01')
self.t0 = 0
self.tf = 8 * 30
''' Model default '''
self.y0 = {}
self.y0['infected'] = self.initial_cases * np.array(self.age_distribution)
self.y0['susceptible'] = (
self.total_population * np.array(self.age_distribution) - self.y0['infected']
)
# print('Total infected:{}'.format(self.y0['infected'][:].sum()))
from pydemic.distributions import GammaDistribution
self.parameters = dict(
ifr=.003,
r0=2.3,
serial_dist=GammaDistribution(mean=4, std=3.25),
seasonal_forcing_amp=.1,
peak_day=15,
incubation_dist=GammaDistribution(5.5, 2),
p_symptomatic=np.array([0.057, 0.054, 0.294, 0.668, 0.614, 0.83,
0.99, 0.995, 0.999]),
# p_positive=1.5,
hospitalized_dist=GammaDistribution(6.5, 1.6),
p_hospitalized=np.array([0.001, 0.003, 0.012, 0.032, 0.049, 0.102,
0.166, 0.243, 0.273]),
discharged_dist=GammaDistribution(9, 6),
critical_dist=GammaDistribution(3, 1),
p_critical=.9 * np.array([0.05, 0.05, 0.05, 0.05, 0.063, 0.122,
0.274, 0.432, 0.709]),
dead_dist=GammaDistribution(7.5, 5.),
p_dead=1.2 * np.array([0.3, 0.3, 0.3, 0.3, 0.3, 0.4, 0.4, 0.5, 0.5]),
recovered_dist=GammaDistribution(9, 2.2),
all_dead_dist=GammaDistribution(3, 3),
all_dead_multiplier=1.,
)
self.state_variables = SEIRPlusPlusSimulation.increment_keys
self.nstates = len(self.state_variables)
# print('Variables:{}'.format(self.state_variables))
self.observation_space = spaces.Box(low=np.zeros(self.nstates),
high=np.ones(self.nstates),
dtype=np.float32)
# Increase, Decrease, Don't change
self.action_factors = [0, 0.01, 0.05, 0.1, -0.01, -0.05, -0.1]
self.action_space = spaces.Discrete(7)
def step(self, action):
# print('step()')
''' Initial step variables '''
done = False
reward = 0
info = ''
self.steps += 1
''' Add new mitigation times '''
self.mitigation_times.append(self.steps * self.mitigation_length)
self.mitigation_times.append(self.mitigation_times[-1] + self.mitigation_dt)
''' Added previous mitgation values '''
self.mitigation_factors.append(self.mitigation_factors[-1])
''' Add new mitigation value '''
new_factor = self.mitigation_factors[-1] + self.action_factors[action]
self.mitigation_factors.append(new_factor)
''' Out of bounds'''
if self.mitigation_factors[-1] > 1:
done = True
reward = -99
info = 'Out of bounds (upper)'
if self.mitigation_factors[-1] < 0:
done = True
reward = -99
info = 'Out of bounds (lower)'
''' Create mitigation model time span '''
tspan_tmp0 = self.tspan[0]
tspan_tmp1 = (pd.to_datetime(self.tspan[0]) + (self.steps + 1) * self.mitigation_length * pd.Timedelta('1D')).strftime('%Y-%m-%d')
self.tspan = (tspan_tmp0, tspan_tmp1)
# print('tspan:{}'.format(self.tspan))
''' Create mitigation model time span '''
self.t0, self.tf = 0, self.steps * self.mitigation_length
''' New mitigation policy '''
print('mitigation times:{}'.format(self.mitigation_times))
print('mitigation factors:{}'.format(self.mitigation_factors))
self.mitigation = MitigationModel(self.t0, self.tf, self.mitigation_times, self.mitigation_factors)
''' Run the model with update mitigation trace '''
sim = SEIRPlusPlusSimulation(self.total_population, self.age_distribution,
mitigation=self.mitigation, **self.parameters)
self.result = sim(self.tspan, self.y0, self.model_dt)
# for key in self.result.y.keys():
# print('{}: {}'.format(key, self.result.y[key].sum(axis=1)[-1]))
total_icu = self.result.y['icu'].sum(axis=1)[-1]
if total_icu > self.icu_max:
reward = -499
done = True
info = 'Exceeded the infection capacity'
# Calculate the reward
if done != True:
reward = total_icu / (self.icu_max + 1)
if pd.to_datetime(self.tspan[1]) >= self.date_max:
done = True
info = 'Reached the max date'
# Convert dict to state array
next_state = np.array([self.result.y[key][:][-1].sum() for key in self.state_variables])
next_state /= self.total_population
##
if self.steps > 1:
self.render()
return next_state, reward, done, info
def reset(self):
self.episodes += 1
self.steps = 0
self.mitigation_times = [0, self.mitigation_dt]
self.mitigation_factors = [0.5, 0.5]
self.total_population = get_population(self.state)
# print('self.total_population:{}'.format(self.total_population))
##
t0, tf = 0, self.mitigation_length # TODO: What range should consider ?? ##
mitigation = MitigationModel(t0, tf, self.mitigation_times, self.mitigation_factors)
sim = SEIRPlusPlusSimulation(self.total_population, self.age_distribution,
mitigation=mitigation, **self.parameters)
tspan_tmp0 = '2020-01-01'
tspan_tmp1 = (pd.to_datetime(tspan_tmp0) + self.mitigation_length * pd.Timedelta('1D')).strftime('%Y-%m-%d')
self.tspan = (tspan_tmp0, tspan_tmp1)
self.result = sim(self.tspan, self.y0, self.model_dt)
# print('variables:{}'.format(self.result.y.keys()))
next_state = np.array([self.result.y[key][-1][-1] for key in self.state_variables])
next_state /= self.total_population
return next_state
def render(self):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
plt.rcParams['font.family'] = [u'serif']
plt.rcParams['font.size'] = 16
fig, ax = plt.subplots(2, figsize=(18, 12))
''' Migigation strategy '''
filename = self.results_dir + 'covid_render_episode{}'.format(self.episodes)
_t = np.linspace(self.t0, self.tf, 1000)
ax[0].plot(_t, self.mitigation(_t))
''' Results '''
# filename = self.results_dir + 'sim_episode{}'.format(self.episodes)
plot_compartments = [
'icu',
'infected',
'positive',
'all_dead',
'hospitalized',
]
# fig, ax = plt.subplots(figsize=(10, 6))
for name in plot_compartments:
# print(result.y[name].shape)
ax[1].plot(self.result.t,
(self.result.y[name].sum(axis=1)),
label=name)
ax[1].plot()
# plot on y log scale
ax[1].set_yscale('log')
ax[1].set_ylim(ymin=0.8)
# plot x axis as dates
# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d'))
# fig.autofmt_xdate()
# create legend
ax[1].legend(loc='center left', bbox_to_anchor=(1, .5))
ax[1].set_xlabel('time')
ax[1].set_ylabel('count (persons)')
plt.savefig(filename)
return 0
| en | 0.703016 | # This material was prepared as an account of work sponsored by an agency of the # United States Government. Neither the United States Government nor the United # States Department of Energy, nor Battelle, nor any of their employees, nor any # jurisdiction or organization that has cooperated in the development of these # materials, makes any warranty, express or implied, or assumes any legal # liability or responsibility for the accuracy, completeness, or usefulness or # any information, apparatus, product, software, or process disclosed, or # represents that its use would not infringe privately owned rights. Reference # herein to any specific commercial product, process, or service by trade name, # trademark, manufacturer, or otherwise does not necessarily constitute or imply # its endorsement, recommendation, or favoring by the United States Government # or any agency thereof, or Battelle Memorial Institute. The views and opinions # of authors expressed herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # PACIFIC NORTHWEST NATIONAL LABORATORY # operated by # BATTELLE # for the # UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # self.cfg_data = super.get_config() Initial key variable setup Define the model time scale for each step # [days] a month delay # [days] # [day] Mitigation factors is used as the action Define the initial model parameters and distributions # print('self.total_population:{}'.format(self.total_population)) # TODO: Use some initial time (Jan 1st, 2020) Model default # print('Total infected:{}'.format(self.y0['infected'][:].sum())) # p_positive=1.5, # print('Variables:{}'.format(self.state_variables)) # Increase, Decrease, Don't change # print('step()') Initial step variables Add new mitigation times Added previous mitgation values Add new mitigation value Out of bounds Create mitigation model time span # print('tspan:{}'.format(self.tspan)) Create mitigation model time span New mitigation policy Run the model with update mitigation trace # for key in self.result.y.keys(): # print('{}: {}'.format(key, self.result.y[key].sum(axis=1)[-1])) # Calculate the reward # Convert dict to state array ## # print('self.total_population:{}'.format(self.total_population)) ## # TODO: What range should consider ?? ## # print('variables:{}'.format(self.result.y.keys())) Migigation strategy Results # filename = self.results_dir + 'sim_episode{}'.format(self.episodes) # fig, ax = plt.subplots(figsize=(10, 6)) # print(result.y[name].shape) # plot on y log scale # plot x axis as dates # ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d')) # fig.autofmt_xdate() # create legend | 1.883082 | 2 |
stress_detector/dev_settings.py | sa-y-an/retro | 0 | 6614650 | <reponame>sa-y-an/retro<filename>stress_detector/dev_settings.py
from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
params = json.load(open(os.path.join(BASE_DIR, 'stress_detector/config.json'), 'r'))
SECRET_KEY = params['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = params['DEBUG']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stress_detector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stress_detector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#SMTP Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = params["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = params["EMAIL_HOST_PASSWORD"]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| from pathlib import Path
import os
import json
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
params = json.load(open(os.path.join(BASE_DIR, 'stress_detector/config.json'), 'r'))
SECRET_KEY = params['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = params['DEBUG']
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'stress_detector.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'stress_detector.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#SMTP Configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = params["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = params["EMAIL_HOST_PASSWORD"]
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' | en | 0.637194 | # Build paths inside the project like this: BASE_DIR / 'subdir'. # SECURITY WARNING: don't run with debug turned on in production! # Application definition # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ #SMTP Configuration | 1.917796 | 2 |