prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
*
from ie_stats import *
from ie_action import ACT_LEFT, ACT_RIGHT
from ie_spells import *
from ie_restype import RES_2DA
#################################################################
# this is in the operator module of the standard python lib
def itemgetter(*items):
if len(items) == 1:
item = items[0]
def g(obj):
return obj[item]
else:
def g(obj):
return tuple(obj[item] for item in items)
return g
#################################################################
# routines for the actionbar spell access code
def GetUsableMemorizedSpells(actor, BookType):
memorizedSpells = []
spellResRefs = []
for level in range (20): # Saradas NPC teaches you a level 14 special ...
spellCount = GemRB.GetMemorizedSpellsCount (actor, BookType, level, False)
for i in range (spellCount):
Spell0 = GemRB.GetMemorizedSpell (actor, BookType, level, i)
if not Spell0["Flags"]:
# depleted, so skip
continue
if Spell0["SpellResRef"] in spellResRefs:
# add another one, so we can get the count more cheaply later
spellResRefs.append (Spell0["SpellResRef"])
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['SpellIndex'] = GemRB.GetSpelldataIndex (actor, Spell["SpellResRef"], 1<<BookType) # crucial!
if Spell['SpellIndex'] == -1:
print "Error, memorized spell not found!", Spell["SpellResRef"], 1<<BookType
Spell['SpellIndex'] += 1000 * 1<<BookType
memorizedSpells.append (Spell)
if not len(memorizedSpells):
return []
# count and remove the duplicates
memorizedSpells2 = []
for spell in memorizedSpells:
if spell["SpellResRef"] in spellResRefs:
spell['MemoCount'] = spellResRefs.count(spell["SpellResRef"])
while spell["SpellResRef"] in spellResRefs:
spellResRefs.remove(spell["SpellResRef"])
memorizedSpells2.append(spell)
return memorizedSpells2
def GetKnownSpells(actor, BookType):
knownSpells = []
spellResRefs = []
for level in range (9):
spellCount = GemRB.GetKnownSpellsCount (actor, BookType, level)
for i in range (spellCount):
Spell0 = GemRB.GetKnownSpell (actor, BookType, level, i)
if Spell0["SpellResRef"] in spellResRefs:
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['MemoCount'] = 0
Spell['SpellIndex'] = 1000 * 1<<BookType # this gets assigned properly later
knownSpells.append (Spell)
return knownSpells
def GetKnownSpellsLevel(actor, BookType, level):
knownSpells = []
spellResRefs = []
spellCount = GemRB.GetKnownSpellsCount (actor, BookType, level)
for i in range (spellCount):
Spell0 = GemRB.GetKnownSpell | (actor, BookType, level, i)
if Spell0["SpellResRef"] in spellResRefs:
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = Boo | kType # just another sorting key
knownSpells.append (Spell)
return knownSpells
def index (list, value):
for i in range(len(list)):
if list[i]==value:
return i
return -1
def GetMemorizedSpells(actor, BookType, level):
memoSpells = []
spellResRefs = []
spellCount = GemRB.GetMemorizedSpellsCount (actor, BookType, level, False)
for i in range (spellCount):
Spell0 = GemRB.GetMemorizedSpell (actor, BookType, level, i)
pos = index(spellResRefs,Spell0["SpellResRef"])
if pos!=-1:
memoSpells[pos]['KnownCount']+=1
memoSpells[pos]['MemoCount']+=Spell0["Flags"]
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['KnownCount'] = 1
Spell['MemoCount'] = Spell0["Flags"]
memoSpells.append (Spell)
return memoSpells
# direct access to the spellinfo struct
# SpellIndex is the index of the spell in the struct, but we add a thousandfold of the spell type for later use in SpellPressed
def GetSpellinfoSpells(actor, BookType):
memorizedSpells = []
spellResRefs = GemRB.GetSpelldata (actor)
i = 0
for resref in spellResRefs:
Spell = GemRB.GetSpell(resref)
Spell['BookType'] = BookType # just another sorting key
Spell['SpellIndex'] = i + 1000 * 255 # spoofing the type, so any table would work
Spell['MemoCount'] = 1
memorizedSpells.append (Spell)
i += 1
return memorizedSpells
def SortUsableSpells(memorizedSpells):
# sort it by using the spldisp.2da table
layout = CommonTables.SpellDisplay.GetValue ("USE_ROW", "ROWS")
layout = CommonTables.SpellDisplay.GetRowName (layout)
order = CommonTables.SpellDisplay.GetValue ("DESCENDING", "ROWS")
key1 = CommonTables.SpellDisplay.GetValue (layout, "KEY1")
key2 = CommonTables.SpellDisplay.GetValue (layout, "KEY2")
key3 = CommonTables.SpellDisplay.GetValue (layout, "KEY3")
if key1:
if key3 and key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2, key3), reverse=order)
elif key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2), reverse=order)
else:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1), reverse=order)
return memorizedSpells
# Sets up all the (12) action buttons for a player character with different spell or innate icons.
# It also sets up the scroll buttons left and right if needed.
# If Start is supplied, it will skip the first few items (used when scrolling through the list)
# BookType is a spellbook type bitfield (1-mage, 2-priest, 4-innate and others in iwd2)
# Offset is a control ID offset here for iwd2 purposes
def SetupSpellIcons(Window, BookType, Start=0, Offset=0):
actor = GemRB.GameGetFirstSelectedActor ()
# check if we're dealing with a temporary spellbook
if GemRB.GetVar("ActionLevel") == 11:
allSpells = GetSpellinfoSpells (actor, BookType)
else:
# construct the spellbook of usable (not depleted) memorized spells
# the getters expect the BookType as: 0 priest, 1 mage, 2 innate
if BookType == -1:
# Nahal's reckless dweomer can use any known spell
allSpells = GetKnownSpells (actor, IE_SPELL_TYPE_WIZARD)
else:
allSpells = []
for i in range(16):
if BookType & (1<<i):
allSpells += GetUsableMemorizedSpells (actor, i)
if not len(allSpells):
raise AttributeError ("Error, unknown BookType passed to SetupSpellIcons: %d! Bailing out!" %(BookType))
return
if BookType == -1:
memorizedSpells = allSpells
# reset Type, so we can choose the surge spell instead of just getting a redraw of the action bar
GemRB.SetVar("Type", 3)
else:
memorizedSpells = SortUsableSpells(allSpells)
# start creating the controls
import GUICommonWindows
# TODO: ASCOL, ROWS
#AsCol = CommonTables.SpellDisplay.GetValue (layout, "AS_COL")
#Rows = CommonTables.SpellDisplay.GetValue (layout, "ROWS")
More = len(memorizedSpells) > 12 or Start > 0
# scroll left button
if More:
Button = Window.GetControl (Offset)
Button.SetText ("")
if Start:
GUICommonWindows.SetActionIconWorkaround (Button, ACT_LEFT, 0)
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
else:
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetTooltip ("")
Button.SetState (IE_GUI_BUTTON_DISABLED)
# disable all spells if fx_disable_spellcasting was run with the same type
# but only if there are any spells of that type to disable
disabled_spellcasting = GemRB.GetPlayerStat(actor, IE_CASTING, 0)
actionLevel = GemRB.GetVar ("ActionLevel")
#order is: mage, cleric, innate, class, song, (defaults to 1, item)
spellSections = [2, 4, 8, 16, 16]
# create the spell icon buttons
buttonCount = 12 - More * 2 # GUIBT_COUNT in PCStatsStruct
for i in range (buttonCount):
Button = Window.GetControl (i+Offset+More)
Button.SetEvent (IE_GUI_BUTTON_ON_RIGHT_PRESS, None)
if i+Start >= len(memorizedSpells):
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetText ("")
Button.SetTooltip ("")
continue
Spell = memorizedSpells[i+Start]
spellType = Spell['SpellType']
if spellType > 4:
spellType = 1
else:
spellType = spellSections[spellType]
if BookType == -1:
Button.SetVarAssoc ("Spell", |
# Refer to the following link for help:
# http://docs.gunicorn.org/en/latest/settings.html
command = '/home/lucas/www/reddit.lucasou.com/reddit-env/bin/gunicorn'
pythonpath = '/home/lucas/www/reddit.lucasou.com/reddit-env/flask_reddit'
bin | d = '127.0.0.1:8040'
workers | = 1
user = 'lucas'
accesslog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-access.log'
errorlog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-error.log'
|
import unittest
from katas.kyu_7.guess_my_number import guess_my_number
class GuessMyNumberTestCase(unittest.TestCase):
def test_equals(self):
| self.assertEqual | (guess_my_number('0'), '###-###-####')
def test_equals_2(self):
self.assertEqual(guess_my_number('01'), '1##-##1-####')
def test_equals_3(self):
self.assertEqual(guess_my_number('012'), '12#-##1-2###')
def test_equals_4(self):
self.assertEqual(guess_my_number('0123'), '123-##1-23##')
def test_equals_5(self):
self.assertEqual(guess_my_number('01234'), '123-4#1-234#')
def test_equals_6(self):
self.assertEqual(guess_my_number('012345'), '123-451-2345')
|
RPOSE. See the GNU
# General Public License for more details.
#
# A copy of the GNU General Public License is available on the World
# Wide Web at <http://www.gnu.org/copyleft/gpl.html>. You can also
# obtain it by writing to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
#
#***********************************************************************
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMessageBox, QWidget
from qgis.core import QgsMessageLog, QgsMapLayerRegistry
from qgis.gui import QgsMessageBar
from analysis_ui import Ui_AnalysisWidget as Ui_Widget
import util
import classifier
import segmenter
import statistics
# from QgsMapLayer
VectorLayer = 0
RasterLayer = 1
# MESSAGE_LEVEL = util.AttrDict({
# 'INFO': 0,
# 'WARNING': 1,
# 'CRITICAL': 2
# })
class AnalysisWidget(QWidget, Ui_Widget):
def __init__(self, iface):
QWidget.__init__(self)
self.setupUi(self)
self.iface = iface
self.layer_registry = QgsMapLayerRegistry.instance()
self.layers = self.iface.legendInterface().layers()
self.task = None
self.iface.mapCanvas().layersChanged.connect(self.layers_changed)
self.tabs = ['segm', 'stats', 'clf']
self.tab_ipts = {
'segm': [self.segm_raster_ipt, self.segm_clusters_ipt],
'stats': [self.stats_raster_ipt, self.stats_segm_ipt],
'clf': [self.class_segm_ipt, self.class_roi_ipt,
self.class_roi_field, self.svm_kernel_ipt, self.svm_c_ipt,
self.svm_kgamma_ipt, self.svm_kdegree_ipt,
self.svm_kcoeff_ipt],
}
self.modules = {
'segm': segmenter,
'stats': statistics,
'clf': classifier
}
self.ok_btn.pressed.connect(self.run)
self.tabWidget.currentChanged['int'].connect(self.update_tab_focus)
self.tabWidgetClf.currentChanged['int'].connect(self.update_subfocus_clf)
self.update_tab_focus(self.tabWidget.currentIndex())
self.class_roi_ipt.currentIndexChanged['QString'].connect(self.update_roi_field)
self.svm_kernel_ipt.currentIndexChanged.connect(self.update_svm_attr)
def log(self, msg, level='info'):
level_dict = {
'info': QgsMessageLog.INFO,
'warn': QgsMessageLog.WARNING,
'crit': QgsMessageLog.CRITICAL,
}
QgsMessageLog.logMessage(str(msg), level=level_dict[level])
def layers_changed(self):
layers = self.iface.legendInterface().layers()
if self.layers != layers:
self.layers = layers
self.update_tab_focus(self.tabWidget.currentIndex())
def get_layers(self, ltype):
return [l for l in self.layers if l.type() == ltype]
def get_layer(self, ltype, name):
return [l for l in self.get_layers(ltype) if l.name() == name][0]
def update_combo_box(self, ltype, ipt):
ipt.clear()
ipt.addItems([u'',] + [l.name() for l in self.get_layers(ltype)])
def update_tab_order(self, inputs):
ipts = [self.tabWidget, self.ok_btn]
ipts[1:1] = inputs
for i in range(len(ipts)-1):
self.setTabOrder(ipts[i], ipts[i+1])
def update_tab_focus(self, index):
getattr(self, 'update_focus_%s' % self.tabs[index])()
self.tabWidget.setFocus()
def update_focus_segm(self):
# update combo boxes
self.update_combo_box(RasterLayer, self.segm_raster_ipt)
# tab order
self.update_tab_order(self.tab_ipts['segm'])
def update_focus_stats(self):
self.update_combo_box(RasterLayer, self.stats_raster_ipt)
self.update_combo_box(VectorLayer, self.stats_segm_ipt)
# tab order
self.update_tab_order(self.tab_ipts['stats'])
def update_focus_clf(self):
self.update_combo_box(0, self.class_segm_ipt)
self.update_combo_box(0, self.class_roi_ipt)
self.update_subfocus_clf()
def update_subfocus_clf(self):
idx = self.tabWidgetClf.currentIndex() and [3, None] or [None, 3]
ipts = self.tab_ipts['clf'][slice(*idx)] + [self.tabWidgetClf]
self.update_tab_order(ipts)
def update_roi_field(self, layer_name):
self.class_roi_field.clear()
if layer_name:
layer = self.get_layer(VectorLayer, layer_name)
fields = layer.dataProvider().fieldNameMap().keys()
self.class_roi_field.addItems(fields)
def update_svm_attr(self, item_index):
kernel = self.svm_kernel_ipt.currentText().lower()
ipts = self.tab_ipts['clf'][5:]
attr_list = {
'linear': [],
'poly': ipts[1:],
'rbf': ipts[0:1],
'sigmoid': ipts[2:3],
}
for ipt in ipts:
ipt.setEnabled(ipt in attr_list[kernel])
def get_text(self, ipt):
try:
return ipt.currentText()
except AttributeError:
return ipt.cleanText()
def run(self):
# create a new task instance
tab_name = self.tabs[self.tabWidget.currentIndex()]
self.log('starting %s' % tab_name)
# set task up
args = [self.get_text(ipt)
for ipt in self.tab_ipts[tab_name]]
task = self.modules[tab_name].Task(self, *args)
# validate
if not task.is_valid():
QMessageBox.critical(self.iface.mainWindow(), 'Error',
task.invalid)
return
# update gui
self.ok_btn.setEnabled(False)
self.cance | l_btn.pressed.connect(task.kill)
self.progressBar.setValue(0)
# configure QgsMessageBar
action = self.tabWidget.tabText(self.tabWidget.currentIndex())
messageBar = self.iface.messageBar().createMessage(action, '')
msgProgressBar = QtGui.QProgressBar()
| msgProgressBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
cancelButton = QtGui.QPushButton()
cancelButton.setText('Cancel')
cancelButton.clicked.connect(self.cancel_btn.click)
messageBar.layout().addWidget(msgProgressBar)
messageBar.layout().addWidget(cancelButton)
self.iface.messageBar().pushWidget(messageBar, QgsMessageBar.INFO)
# hold objects
self.messageBar = messageBar
self.action = action
# fire task
task.run()
# self.ok_btn.setEnabled(False)
# self.progressBar.setValue(0)
# self.thread = QtCore.QThread()
# # create a new worker instance
# tab_name = self.tabs[self.tabWidget.currentIndex()]
# self.log('starting %s' % tab_name)
# # set worker up
# args = [self.get_text(ipt) for ipt in self.tab_ipts[tab_name]]
# worker = self.modules[tab_name].Worker(self)
# self.worker = worker
# # worker = util.Worker()
# self.post_run = worker.post_run
# worker.setup(*args)
# # validate
# if not worker.is_valid():
# QMessageBox.critical(self.iface.mainWindow(), 'Error',
# worker.invalid)
# self.kill()
# return
# # configure QgsMessageBar
# action = self.tabWidget.tabText(self.tabWidget.currentIndex())
# messageBar = self.iface.messageBar().createMessage(action, '')
# progressBar = QtGui.QProgressBar()
# progressBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
# cancelButton = QtGui.QPushButton()
# cancelButton.setText('Cancel')
# cancelButton.clicked.connect(self.cancel_btn.click)
# messageBar.layout().addWidget(progressBar)
# messageBar.layout().addWidget(cancelButton)
# self.iface.messageBar().pushWidget(messageBar, QgsMessageBar.INFO)
# # start the worker in a new thread
# worker.moveToThread(self.thread)
# self.thread.started.connect(worker.run)
# # setup signals
# worker.log.connect(self.log)
# worker.status.connect(self.status)
# worker.progress.connect(progressBar.setValue)
|
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Light Sensor and the LED together to turn the LED On and OFF if the background light is greater than a threshold.
# Modules:
# http://www.seeedstudio.com/wiki/Grove_-_Light_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission i | s hereby granted, free of charge, to any person obtaining a copy
of this software and associated doc | umentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Light Sensor to analog port A0
# SIG,NC,VCC,GND
light_sensor = 0
# Connect the LED to digital port D4
# SIG,NC,VCC,GND
led = 4
# Turn on LED once sensor exceeds threshold resistance
threshold = 10
grovepi.pinMode(light_sensor,"INPUT")
grovepi.pinMode(led,"OUTPUT")
while True:
try:
# Get sensor value
sensor_value = grovepi.analogRead(light_sensor)
# Calculate resistance of sensor in K
resistance = (float)(1023 - sensor_value) * 10 / sensor_value
if resistance > threshold:
# Send HIGH to switch on LED
grovepi.digitalWrite(led,1)
else:
# Send LOW to switch off LED
grovepi.digitalWrite(led,0)
print("sensor_value = %d resistance =%.2f" %(sensor_value, resistance))
time.sleep(.5)
except IOError:
print ("Error")
|
lay_music('search6', -1)
if self.game.ball_count.position == 7:
self.game.sound.play_music('search7', -1)
if self.game.ball_count.position == 8:
self.game.sound.play_music('search8', -1)
def sw_tilt_active(self, sw):
if self.game.tilt.status == False:
self.tilt_actions()
def replay_step_down(self, number=0):
if number > 0:
if number > 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.spelling_bee.display(self)
self.delay(name="replay_reset", delay=0.13, handler=self.replay_step_down, param=number)
elif number == 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.spelling_bee.display(self)
self.cancel_delayed(name="replay_reset")
else:
if self.game.replays > 0:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
self.game.coils.registerDown.pulse()
def replay_step_up(self):
if self.game.replays < 99:
self.game.replays += 1
graphics.replay_step_up(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerUp.pulse()
graphics.spelling_bee.display(self)
def search(self):
# The search workflow/logic will determine if you actually have a winner, but it is a bit tricky.
# if the ball is in a particular hole, the search relays need to click and/or clack, and
# when you have at least three going at once, it should latch on the search index and score.
# This scoring is tempered by the selector disc. You have to have the card enabled that you're
# winning on. This whole process will have to happen on a rotational basis. The search should really
| # begin immediately upon the first ball landing in the hole.
# I suspect that the best, fastest way to complete the search is actually to reimplement the mechanical
# search activity. For each revolution of the search disc (which happens about every 5-7 seconds), the
# game will activate() each search relay for each 'hot' rivet on the search disc. This can be on a different
# wiper finger for each se | t of rivets on the search disc.
# Replay counters also need to be implemented to prevent the supplemental searches from scoring.
for i in range(0, 100):
if i <= 50:
self.r = self.closed_search_relays(self.game.searchdisc.position)
self.game.searchdisc.spin()
if i >= 51:
self.r = self.closed_search_relays(self.game.searchdisc2.position + 50)
self.game.searchdisc2.spin()
self.wipers = self.r[0]
self.card = self.r[1]
self.four = self.r[2]
# From here, I need to determine based on the value of r, whether to latch the search index and score. For Bright Lights,
# I need to determine the best winner on each card. To do this, I must compare the position of the replay counter before
# determining the winner. Reminder that my replay counters are a 1:1 representation.
self.match = []
for key in self.wipers:
for number in self.holes:
if number == key:
self.match.append(self.wipers[key])
relays = sorted(set(self.match))
#TODO Play sound for each relay closure.
s = functions.count_seq(relays)
if self.game.selector.position >= self.card:
if s >= 3:
self.find_winner(s, self.card, self.four)
break
def find_winner(self, relays, card, four):
if self.game.search_index.status == False and self.game.replays < 99:
if card == 1:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card1_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(amount - self.game.card1_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card1_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(amount - self.game.card1_replay_counter.position)
if card == 2:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card2_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card2_replay_step_up(amount - self.game.card2_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card2_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card2_replay_step_up(amount - self.game.card2_replay_counter.position)
if card == 3:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card3_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card3_replay_step_up(amount - self.game.card3_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card3_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card3_replay_step_up(amount - self.game.card3_replay_counter.position)
if card == 4:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card4_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card4_replay_step_up(amount - self.game.card4_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card4_replay_counter.position < amount:
s |
# encoding: utf-8
#
# Copyright (C) 2013 midnightBITS/Marcin Zdun
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modif | y, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notic | e and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
'''
Created on 09-05-2017
@author: Marcin Zdun
'''
def getAnchor(index, defndx, *points):
if index < 0: return points[defndx]
index %= len(points)
return points[index]
def getAnchorDiag(diag, index, defndx, *points):
if index < 0: return points[defndx]
index %= len(points)
return points[index]
def boxAnchor(index, defndx, x1, y1, x2, y2):
w2 = float(x2 - x1)/2
h2 = float(y2 - y1)/2
w4 = w2/2
h4 = h2/2
return getAnchor(index, defndx,
(x1, y1), (x1 + w4, y1), (x1 + w2, y1), (x1 + w2 + w4, y1),
(x2, y1), (x2, y1 + h4), (x2, y1 + h2), (x2, y1 + h2 + h4),
(x2, y2), (x1 + w2 + w4, y2), (x1 + w2, y2), (x1 + w4, y2),
(x1, y2), (x1, y1 + h2 + h4), (x1, y1 + h2), (x1, y1 + h4)
)
|
fwhm = sigma * sigma_to_fwhm
R = ckms / fwhm
width = Rsigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='R'"
# convert inres from Rsigma to sigma (km/s)
try:
kwargs['inres'] = ckms / kwargs['inres']
except(KeyError):
pass
elif smoothtype == 'lambda':
linear = True
units = 'AA'
sigma = resolution
fwhm = sigma * sigma_to_fwhm
Rsigma = None
R = None
width = sigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='lambda'"
elif smoothtype == 'lsf':
linear = True
width = 100
sigma = resolution
else:
raise ValueError("smoothtype {} is not valid".format(smoothtype))
# Mask the input spectrum depending on outwave or the wave_smooth kwargs
mask = mask_wave(wave, width=width, outwave=outwave, linear=linear,
wlo=min_wave_smooth, whi=max_wave_smooth, **kwargs)
w = wave[mask]
s = spec[mask]
if outwave is None:
outwave = wave
# Choose the smoothing method
if smoothtype == 'lsf':
if fftsmooth:
smooth_method = smooth_lsf_fft
if sigma is not None:
# mask the resolution vector
sigma = resolution[mask]
else:
smooth_method = smooth_lsf
if sigma is not None:
# convert to resolution on the output wavelength grid
sigma = np.interp(outwave, wave, resolution)
elif linear:
if fftsmooth:
smooth_method = smooth_wave_fft
else:
smooth_method = smooth_wave
else:
if fftsmooth:
smooth_method = smooth_vel_fft
else:
smooth_method = smooth_vel
# Actually do the smoothing and return
return smooth_method(w, s, outwave, sigma, **kwargs)
def smooth_vel(wave, spec, outwave, sigma, nsigma=10, inres=0, **extras):
"""Smooth a spectrum in velocity space. This is insanely slow, but general
and correct.
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired velocity resolution (km/s), *not* FWHM.
:param nsigma:
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres:
The velocity resolution of the input spectrum (km/s), *not* FWHM.
"""
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq) < 0.0:
raise ValueError("Desired velocity resolution smaller than the value"
"possible for this input spectrum.".format(inres))
# sigma_eff is in units of sigma_lambda / lambda
sigma_eff = np.sqrt(sigma_eff_sq) / ckms
lnwave = np.log(wave)
flux = np.zeros(len(outwave))
for i, w in e | numerate(outwave):
x = (np.log(w) - lnwave) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_ | spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_vel_fft(wavelength, spectrum, outwave, sigma_out, inres=0.0,
**extras):
"""Smooth a spectrum in velocity space, using FFTs. This is fast, but makes
some assumptions about the form of the input spectrum and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum. An assertion error will result
if this is not a regular grid in wavelength.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma_out:
Desired velocity resolution (km/s), *not* FWHM. Scalar or length 1 array.
:param inres:
The velocity resolution of the input spectrum (km/s), dispersion *not*
FWHM.
"""
# The kernel width for the convolution.
sigma = np.sqrt(sigma_out**2 - inres**2)
if sigma <= 0:
return np.interp(outwave, wavelength, spectrum)
# make length of spectrum a power of 2 by resampling
wave, spec = resample_wave(wavelength, spectrum)
# get grid resolution (*not* the resolution of the input spectrum) and make
# sure it's nearly constant. It should be, by design (see resample_wave)
invRgrid = np.diff(np.log(wave))
assert invRgrid.max() / invRgrid.min() < 1.05
dv = ckms * np.median(invRgrid)
# Do the convolution
spec_conv = smooth_fft(dv, spec, sigma)
# interpolate onto output grid
if outwave is not None:
spec_conv = np.interp(outwave, wave, spec_conv)
return spec_conv
def smooth_wave(wave, spec, outwave, sigma, nsigma=10, inres=0, in_vel=False,
**extras):
"""Smooth a spectrum in wavelength space. This is insanely slow, but
general and correct (except for the treatment of the input resolution if it
is velocity)
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units. This can be a
vector of same length as ``wave``, in which case a wavelength dependent
broadening is calculated
:param nsigma: (optional, default=10)
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres: (optional, default: 0.0)
Resolution of the input, in either wavelength units or
lambda/dlambda (c/v). Ignored if <= 0.
:param in_vel: (optional, default: False)
If True, the input spectrum has been smoothed in velocity
space, and ``inres`` is assumed to be in lambda/dlambda.
:returns flux:
The output smoothed flux vector, same length as ``outwave``.
"""
# sigma_eff is in angstroms
if inres <= 0:
sigma_eff_sq = sigma**2
elif in_vel:
# Make an approximate correction for the intrinsic wavelength
# dependent dispersion. This sort of maybe works.
sigma_eff_sq = sigma**2 - (wave / inres)**2
else:
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq < 0):
raise ValueError("Desired wavelength sigma is lower than the value "
"possible for this input spectrum.")
sigma_eff = np.sqrt(sigma_eff_sq)
flux = np.zeros(len(outwave))
for i, w in enumerate(outwave):
x = (wave - w) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_wave_fft(wavelength, spectrum, outwave, sigma_out=1.0,
inres=0.0, **extras):
"""Smooth a spectrum in wavelength space, using FFTs. This is fast, but
makes some assumptions about the input spectrum, and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units.
:param inres:
Resolution of the input, in wavelength units (dispersion not FWHM).
:returns flux:
The output |
# -*- coding: utf-8 -*-
'''Python command line tool to maange a local cache of content fom DataONE.
Output is a folder w | ith structure:
cache/
meta.json: Basic metadata about the content in the cache
index.json: An index to entries in the cache. Downlaods are renamed using a
| hash of the identifier as the identifier is not file system safe
0/
.
.
f/
Note that this process runs as a single thread and so will take quite a while
to complete.
Note also that the libraries used emit error messages that may be more
appropriately handled in logic. As a result the output from this script is
quite verbose, though seems to work effecively.
Dependencies:
pip install -U dataone.libclient
# should install downstream dependencies
Use:
python d1_local_copy.py
'''
import logging
from d1_local_copy.local_copy_manager import LocalCopyManager
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# name of a folder that will contain the downloaded content,
cache_folder="cache"
# hostname of coordinating node to use
host="cn.dataone.org"
# Query to retrieve all METADATA entries that are not obsoleted
q = "formatType:METADATA AND -obsoletedBy:[* TO *]"
manager = LocalCopyManager(host=host)
#populate the cache, limiting the total downloads to max_records
manager.populate(q, max_records=1000)
|
def passChecker(password):
import re
passlength = 8
uppercaseRege | x = re.compile(r'[A-Z]')
lowercaseRegex = re.compile(r'[a-z]')
numberRegex = re.compile(r'[0-9]')
if ((uppercaseRegex.search(password) == None) or (lowercaseRegex.search(password) == None) or (numberRegex.search(password) == None) or (len(password) < passlength)):
ret | urn False
return True
|
import win32ras
stateStrings = {
win32ras.RASCS_OpenPort : "OpenPort",
win32ras.RASCS_PortOpened : "PortOpened",
win32ras.RASCS_ConnectDevice : "ConnectDevice",
win32ras.RASCS_DeviceConnected : "DeviceConnected",
win32ras.RASCS_AllDevicesConnected : "AllDevicesConnected",
win32ras.RASCS_Authenticate : "Authenticate",
win32ras.RASCS_AuthNotify : "AuthNotify",
win32ras.RASCS_AuthRetry : "AuthRetry",
win32ras.RASCS_AuthCallback : "AuthCallback",
win32ras.RASCS_AuthChangePassword : "AuthChangePassword",
win32ras.RASCS_AuthProject : "AuthProject",
win32ras.RASCS_AuthLinkSpeed : "AuthLinkSpeed",
win32ras.RASCS_AuthAck : "AuthAck",
win32ras.RASCS_ReAuthenticate : "ReAuthenticate",
win32ras.RASCS_Authenticated : "Authenticated",
win32ras.RASCS_PrepareForCallback : "PrepareForCallback",
win32ras.RASCS_WaitForModemReset : "WaitForModemReset",
win32ras.RASCS_WaitForCallback : "WaitForCallback",
win32ras.RASCS_Projected : "Projected",
win32ras.RASCS_StartAuthentication : "StartAuthentication",
win32ras.RASCS_CallbackComplete : "CallbackComplete",
win32ras.RASCS_LogonNetwork : "LogonNetwork",
win32ras.RASCS_Interactive : "Interactive",
win32ras.RASCS_RetryAuthentication : "RetryAuthentication",
win32ras.RASCS_CallbackSetByCaller : "CallbackSetByCaller",
win32ras.RASCS_PasswordExpired : "PasswordExpired | ",
win32ras.RASCS_Connected : "Connected",
win32ras.RASCS_Disconnected : "Disconnected"
}
def TestCallback( hras, msg, state, error, exterror):
print "Callback called with ", hras, msg, stateSt | rings[state], error, exterror
def test(rasName = "_ Divert Off"):
return win32ras.Dial(None, None, (rasName,),TestCallback) |
"""
Logger di varie info per ogni host
"""
from novaclient import client as novaclient
from ceilometerclient import client as ceiloclient
import os
from os import environ as env
import time
def start(hosts, sleep_sec, base_dir):
print 'You must be admin to use this script'
# start logger
time_dir = get_cur_formatted_time()
root_path = os.path.join(base_dir, time_dir)
keystone = {}
keystone['username'] = env['OS_USERNAME']
keystone['password'] = env['OS_PASSWORD']
keystone['auth_url'] = env['OS_AUTH_URL']
keystone['tenant_name'] = env['OS_TENANT_NAME']
nova = (novaclient.Client(3, keystone['username'], keystone['password'], keystone['tenant_name'],
keystone['auth_url'], service_type='compute'))
ceilo = (ceiloclient.get_client(2, username=keystone['username'], password=keystone['password'],
tenant_name=keystone['tenant_name'], auth_url=keystone['auth_url']))
flavor_list = nova.flavors.list()
flavor_dict = dict((flavor.id, flavor.name) for flavor in flavor_list)
while True:
for host in hosts:
host_id = '_'.join([host, host]) #host_node: computeX_computeX
log_info(nova, ceilo, host, host_id, root_path, flavor_dict)
time.sleep(sleep_sec)
def log_info(nova, ceilo, host, host_id, root_path, flavor_dict):
# log info every interval
path = os.path.join(root_path, host)
if not os.path.exists(path):
os.makedirs(path)
print path
log_meter_host_cpu_util(ceilo, host_id, path)
log_meter_host_mem_util(ceilo, host_id, path)
log_meter_host_cpu_mem(ceilo, host_id, path)
log_vms_host(nova, host, path, flavor_dict)
log_alarm_host_cpu_mem(ceilo, host_id, path)
def log_meter_host_cpu_util(ceilo, host_id, path):
# sample of cpu util in percentage
host_cpu_util = ceilo.samples.list(meter_name='host.cpu.util',
limit=1,
q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
| host_cpu_util = (host_cpu_util[0].counter_volume)/100
content = get_string_to_write(str(host_cpu_util))
path_file = get_path_to_file(path, "meter_host_cpu_util")
write_file(path_file, content)
def log_meter_host_mem_util(ceilo, host_id, path):
# sample of ram usage in percentage
host_mem_usage = ceilo.samples.list(meter_name='host.memory.usage',
limit=1,
| q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
host_mem_usage = (host_mem_usage[0].counter_volume)/100
content = get_string_to_write(str(host_mem_usage))
path_file = get_path_to_file(path, "meter_host_mem_util")
write_file(path_file, content)
def log_meter_host_cpu_mem(ceilo, host_id, path):
# sample of cpu-ram combined meter
host_cpu_mem_combo = ceilo.samples.list(meter_name='host.cpu.util.memory.usage',
limit=1,
q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
content = get_string_to_write(str(host_cpu_mem_combo[0].counter_volume))
path_file = get_path_to_file(path, "meter_host_cpu_mem")
write_file(path_file, content)
def log_alarm_host_cpu_mem(ceilo, host_id, path):
# overload and underload alarms
alarms = ceilo.alarms.list(q=[{'field':'meter',
'op':'eq',
'value':'host.cpu.util.memory.usage'}])
hostname = [x.strip() for x in host_id.split('_')][0]
for alarm in alarms:
name = alarm.name
state = alarm.state
#print hostname
#print name
if hostname in name:
name_state = ''
if state == 'ok':
name_state = name + ': ' + '0'
elif state == 'alarm':
name_state = name + ': ' + '1'
else:
name_state = name + ': ' + '2'
content = get_string_to_write(name_state)
if 'overload' in name:
path_file = get_path_to_file(path, "alarm_host_cpu_mem_overload")
write_file(path_file, content)
if 'underload' in name:
path_file = get_path_to_file(path, "alarm_host_cpu_mem_underload")
write_file(path_file, content)
path_file = get_path_to_file(path, "alarm_host_cpu_mem")
write_file(path_file, content)
content = get_string_to_write("**********")
path_file = get_path_to_file(path, "alarm_host_cpu_mem")
write_file(path_file, content)
def log_vms_host(nova, host, path, flavor_dict):
# vms in host
search_opts = {'host': host, 'all_tenants': True}
vms = nova.servers.list(search_opts=search_opts)
path_file = get_path_to_file(path, "vms")
id_flavor = [(vm.id, flavor_dict[vm.flavor['id']]) for vm in vms]
num_vms = len(vms)
content = get_string_to_write(str(num_vms) + ' , ' + str(id_flavor))
write_file(path_file, content)
def write_file(path_file, content):
out_file = open(path_file,"a")
out_file.write(str(content) + os.linesep)
out_file.close()
def get_path_to_file(path, filename):
return os.path.join(path, filename)
def get_string_to_write(content):
return ", ".join([get_cur_formatted_time(), content])
def get_cur_formatted_time():
cur_time = time.time()
formatted_time = time.strftime('%Y-%m-%dT%H:%M:%S',
time.localtime(cur_time))
return formatted_time
compute_hosts = ['compute02', 'compute03', 'compute04']
sleep_sec = 150
base_dir = "log"
start(compute_hosts, sleep_sec, base_dir)
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
import unittest
import numpy as np
import os
from imtools import qmisc
from imtools import misc
#
class QmiscTest(unittest.TestCase):
interactivetTest = False
# interactivetTest = True
# @unittest.skip("waiting for implementation")
def test_suggest_filename(self):
"""
Testing some files. Not testing recursion in filenames. It is situation
if there exist file0, file1, file2 and input file is file
"""
filename = "mujsoubor"
# import ipdb; ipdb.set_trace() # BREAKPOINT
new_filename = misc.suggest_filename(filename, exists=True)
# self.assertTrue(new_filename == "mujsoubor2")
self.assertEqual(new_filename, "mujsoubor_2")
filename = "mujsoubor_112"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_113")
filename = "mujsoubor_2.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_3.txt")
filename = "mujsoubor27.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor27_2.txt")
filename = "mujsoubor-a24.txt"
new_filename = misc.suggest_filename(filename, exists=False)
self.assertEqual(new_filename, "mujsoubor-a24.txt", "Rewrite")
@unittest.skip("getVersionString is not used anymore")
def test_getVersionString(self):
"""
getVersionString is not used anymore
"""
vfn = "../__VERSION__"
existed = False
if not os.path.exists(vfn):
with open(vfn, 'a') as the_file:
the_file.write('1.1.1\n')
existed = False
verstr = qmisc.getVersionString()
self.assertTrue(type(verstr) == str)
if existed:
os.remove(vfn)
def test_obj_to_and_from_file_yaml(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.yaml'
misc.obj_to_file(test_object, filename, 'yaml')
saved_object = misc.obj_from_file(filename, 'yaml')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
def test_obj_to_and_from_file_pickle(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
# def test_obj_to_and_from_file_exeption(self):
# test_object = [1]
# filename = 'test_obj_to_and_from_file_exeption'
# self.assertRaises(misc.obj_to_file(test_object, filename ,'yaml'))
def test_obj_to_and_from_file_with_directories(self):
import shutil
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
dirname = '__test_write_and_read'
filename = '__test_write_and_read/test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pic | kle')
| self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
shutil.rmtree(dirname)
if __name__ == "__main__":
unittest.main()
|
# coding: utf-8
from .graphviz_wrapper import board, add_digraph, add_digraph_node, add_digraph_edge
fr | om .jupyter_helper import jupyter_pan_and_zoom, jupyter_show_as_svg
__author__ = "akimach"
__version__ = "0.0.7"
__licens | e__ = "MIT"
|
llsign
self.__messages = []
self._conn = None
def __ssid(self):
return "[DRATS-%s-B2FHIM$]" % version.DRATS_VERSION
def _send(self, string):
print " -> %s" % string
self._conn.send(string + "\r")
def __recv(self):
resp = ""
while not resp.endswith("\r"):
resp += self._conn.recv(1)
print " <- %s" % escaped(resp)
return resp
def _recv(self):
r = ";"
while r.startswith(";"):
r = self.__recv()
return r;
def _send_ssid(self, recv_ssid):
try:
sw, ver, caps = recv_ssid[1:-1].split("-")
except Exception:
raise Exception("Conversation error (unparsable SSID `%s')" % resp)
self._send(self.__ssid())
prompt = self._recv().strip()
if not prompt.endswith(">"):
raise Exception("Conversation error (never got prompt)")
def __get_list(self):
self._send("FF")
msgs = []
reading = True
while reading:
resp = self._recv()
for l in resp.split("\r"):
if l.startswith("FC"):
print "Creating message for %s" % l
msgs.append(WinLinkMessage(l))
elif l.startswith("F>"):
reading = False
break
elif l.startswith("FQ"):
reading = False
break
elif not l:
pass
else:
print "Invalid line: %s" % l
raise Exception("Conversation error (%s while listing)" % l)
return msgs
def get_messages(self):
self._connect()
self._login()
self.__messages = self.__get_list()
if self.__messages:
self._send("FS %s" % ("Y" * len(self.__messages)))
for msg in self.__messages:
print "Getting message..."
try:
msg.read_from_socket(self._conn)
except Exception, e:
raise
#print e
self._send("FQ")
self._disconnect()
return len(self.__messages)
def get_message(self, index):
return self.__messages[index]
def send_messages(self, messages):
if len(messages) != 1:
raise Exception("Sorry, batch not implemented yet")
self._connect()
self._login()
cs = 0
for msg in messages:
p = msg.get_proposal()
for i in p:
cs += ord(i)
cs += ord("\r")
self._send(p)
cs = ((~cs & 0xFF) + 1)
self._send("F> %02X" % cs)
resp = self._recv()
if not resp.startswith("FS"):
raise Exception("Error talking to server: %s" % resp)
fs, accepts = resp.split()
if len(accepts) != len(messages):
raise Exception("Server refused some of my messages?!")
for msg in messages:
msg.send_to_socket(self._conn)
resp = self._recv()
self._disconnect()
return 1
class WinLinkTelnet(WinLinkCMS):
def __init__(self, callsign, server="server.winlink.org", port=8772):
self.__server = server
self.__port = port
WinLinkCMS.__init__(self, callsign)
def _connect(self):
class sock_file:
def __init__(self):
self.__s = 0
def read(self, len):
return self.__s.recv(len)
def write(self, buf):
return self.__s.send(buf)
def connect(self, spec):
return self.__s.connect(spec)
def close(self):
self.__s.close()
sel | f._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._conn | .connect((self.__server, self.__port))
def _disconnect(self):
self._conn.close()
def _login(self):
resp = self._recv()
resp = self._recv()
if not resp.startswith("Callsign :"):
raise Exception("Conversation error (never saw login)")
self._send(self._callsign)
resp = self._recv()
if not resp.startswith("Password :"):
raise Exception("Conversation error (never saw password)")
self._send("CMSTELNET")
resp = self._recv()
self._send_ssid(resp)
class WinLinkRMSPacket(WinLinkCMS):
def __init__(self, callsign, remote, agw):
self.__remote = remote
self.__agw = agw
WinLinkCMS.__init__(self, callsign)
def _connect(self):
self._conn = agw.AGW_AX25_Connection(self.__agw, self._callsign)
self._conn.connect(self.__remote)
def _disconnect(self):
self._conn.disconnect()
def _login(self):
resp = self._recv()
self._send_ssid(resp)
class WinLinkThread(threading.Thread, gobject.GObject):
__gsignals__ = {
"mail-thread-complete" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_BOOLEAN, gobject.TYPE_STRING)),
"event" : signals.EVENT,
"form-received" : signals.FORM_RECEIVED,
"form-sent" : signals.FORM_SENT,
}
_signals = __gsignals__
def _emit(self, *args):
gobject.idle_add(self.emit, *args)
def __init__(self, config, callsign, callssid=None, send_msgs=[]):
threading.Thread.__init__(self)
self.setDaemon(True)
gobject.GObject.__init__(self)
if not callssid:
callssid = callsign
self._config = config
self._callsign = callsign
self._callssid = callssid
self.__send_msgs = send_msgs
def __create_form(self, msg):
mail = email.message_from_string(msg.get_content())
sender = mail.get("From", "Unknown")
if ":" in sender:
method, sender = sender.split(":", 1)
sender = "WL2K:" + sender
if self._callsign == self._config.get("user", "callsign"):
box = "Inbox"
else:
box = "Outbox"
template = os.path.join(self._config.form_source_dir(),
"email.xml")
formfn = os.path.join(self._config.form_store_dir(),
box, "%s.xml" % msg.get_id())
form = formgui.FormFile(template)
form.set_field_value("_auto_sender", sender)
form.set_field_value("recipient", self._callsign)
form.set_field_value("subject", mail.get("Subject", "Unknown"))
form.set_field_value("message", mail.get_payload())
form.set_path_src(sender.strip())
form.set_path_dst(self._callsign)
form.set_path_mid(msg.get_id())
form.add_path_element("@WL2K")
form.add_path_element(self._config.get("user", "callsign"))
form.save_to(formfn)
return formfn
def _run_incoming(self):
wl = self.wl2k_connect()
count = wl.get_messages()
for i in range(0, count):
msg = wl.get_message(i)
formfn = self.__create_form(msg)
self._emit("form-received", -999, formfn)
if count:
result = "Queued %i messages" % count
else:
result = "No messages"
return result
def _run_outgoing(self):
server = self._config.get("prefs", "msg_wl2k_server")
port = self._config.getint("prefs", "msg_wl2k_port")
wl = self.wl2k_connect()
for mt in self.__send_msgs:
m = re.search("Mid: (.*)\r\nSubject: (.*)\r\n", mt)
if m:
mid = m.groups()[0]
subj = m.groups()[1]
else:
mid = time.strftime("%H%M%SDRATS")
subj = "Message"
wlm = WinLinkMessage()
wlm.set_id(mid)
wlm.set_content(mt, subj)
print m
print mt
wl.send_messages([wlm])
#self._emit("form-sent", -999,
return "Complete"
def run(self):
if self.__send_ |
ps,
| weights,
randomizer_scale=randomizer_scale * sigma_)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
| conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_lasso(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with groups of size 1, ie lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_mixed(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with a mix of groups of size 1, and larger
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
groups[-5:] = -1
groups[-8:-5] = -2
Y += X[:,-8:].dot(np.ones(8)) * 5 # so we select the last two groups
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_all_targets(n=100, p=20, signal_fac=1.5, s=5, sigma=3, rho=0.4):
for target in ['full', 'selected', 'debiased']:
test_group_lasso(n=n,
p=p,
signal_fac=signal_fac,
s=s,
sigma=sigma,
rho=rho,
target=target)
def main(nsim=500, n=200, p=50, target='full', sigma=3):
import matplotlib.pyplot as plt
P0, PA = [], []
from statsmodels.distributions import ECDF
for i in range(nsim):
try:
p0, pA = test_group_lasso(n=n, p=p, target=target, sigma=sigma)
except:
pass
print(len(p0), len(pA))
P0.extend(p0)
PA.extend(pA)
P0_clean = np.array(P0)
|
"""Mac-only module to find the home file | of a resource."""
import sstruct
import array
import calldll
import macfs, Res
def HomeResFile(res):
"""Return a path to the file in which resource 'res' lives."""
return GetFileLocation(res.HomeResFile())
def GetFileLocation(refNum):
"""Return a path to the open file identified with refNum."""
pb = ParamBlock(refNum)
return pb.getPath()
#
# Inter | nal cruft, adapted from MoreFiles
#
_InterfaceLib = calldll.getlibrary("InterfaceLib")
GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort")
_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong")
_FCBPBFormat = """
qLink: l
qType: h
ioTrap: h
ioCmdAddr: l
ioCompletion: l
ioResult: h
ioNamePtr: l
ioVRefNum: h
ioRefNum: h
filler: h
ioFCBIndx: h
filler1: h
ioFCBFINm: l
ioFCBFlags: h
ioFCBStBlk: h
ioFCBEOF: l
ioFCBPLen: l
ioFCBCrPs: l
ioFCBVRefNum: h
ioFCBClpSiz: l
ioFCBParID: l
"""
class ParamBlock:
"""Wrapper for the very low level FCBPB record."""
def __init__(self, refNum):
self.__fileName = array.array("c", "\0" * 64)
sstruct.unpack(_FCBPBFormat,
"\0" * sstruct.calcsize(_FCBPBFormat), self)
self.ioNamePtr = self.__fileName.buffer_info()[0]
self.ioRefNum = refNum
self.ioVRefNum = GetVRefNum(refNum)
self.__haveInfo = 0
def getInfo(self):
if self.__haveInfo:
return
data = sstruct.pack(_FCBPBFormat, self)
buf = array.array("c", data)
ptr = buf.buffer_info()[0]
err = _getInfo(ptr)
if err:
raise Res.Error, ("can't get file info", err)
sstruct.unpack(_FCBPBFormat, buf.tostring(), self)
self.__haveInfo = 1
def getFileName(self):
self.getInfo()
data = self.__fileName.tostring()
return data[1:ord(data[0])+1]
def getFSSpec(self):
self.getInfo()
vRefNum = self.ioVRefNum
parID = self.ioFCBParID
return macfs.FSSpec((vRefNum, parID, self.getFileName()))
def getPath(self):
return self.getFSSpec().as_pathname()
if __name__ == "__main__":
fond = Res.GetNamedResource("FOND", "Helvetica")
print HomeResFile(fond)
|
ets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((11842.6, 9383.56, 9068.83), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((11007.5, 8508.02, 10007.1), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9961.01, 7958.91, 11407.6), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9762.12, 8128.95, 13085.8), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9105.04, 8055.92, 11818.5), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8098.96, 8945.37, 10136.6), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7113.44, 10486.1, 9008.71), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6624.41, 11283.8, 8528.05), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5149.07, 9466.37, 7162.96), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3504.19, 8941.41, 6449.57), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3197.3, 7852.11, 7001.47), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2170.99, 6197.96, 8273.91), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2865.17, 6382.4, 7807.34), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1678.42, 6425.8, 7006.61), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((135.134, 4970.61, 6205.73), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((571.58, 4380.79, 4957.66), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1541.14, 3448.69, 4309.93), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1849.42, 1776.94, 3839.31), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2383.37, 290.48, 4448.44), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2421.7, 991.476, 5898.51), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1032.67, 1633.14, 5847.07), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1237.16, 3532.79, 5257.35), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set | ('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((553.737, 3534.85, 4213.84), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set( | 'particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1601.81, 4124.33, 4851.28), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2243.52, 3644.67, 4704.5), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1631.44, 3925.35, 4647.59), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1142.92, 5369.1, 5674.5), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2626.92, 7819.11, 5319.34), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3047.34, 9026.17, 3950.98), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3457.58, 9145.33, 2883.25), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2644.32, 8950.16, 1073.97), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((846.027, 9108.82, -893.839), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((-6.51386, 7800.76, -208.332), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((-40.7095, 8772.6, 2535.54), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
|
import json
from multiprocessing import Pool, Manager
import os |
import requests
import Quandl as quandl
# set working directory to script directory.
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
errors = []
def get_url(url, vars=None):
if vars is not None:
var_string = '?'
for key, value in vars.items():
var_string += '{0}={1}&'.format(key, value)
var_string = var_string[:-1]
url += var_string
return {'url': url, 'page': vars['page']}
|
def get_csv(url):
results = requests.get(url['url'])
fname = os.path.join(dname, 'meta/zhv_index/{0}.csv'.format(url['page']))
print(fname)
with open(fname, 'w') as f:
for d in results['datasets']:
f.write(l + '\n')
return
def main():
requests = []
url = 'https://www.quandl.com/api/v3/datasets.csv'
for i in range(1, 12663):
vars = {'database_code': 'ZILL',
'per_page': '100',
'sort_by': 'id',
'page': str(i),
'api_key': 'sWyovn27HuCobNWR2xyz'}
requests.append(dict(url=get_url(url, vars), id=str(i))
pool = Pool(8)
pool.map(get_csv, urls)
pool.close()
pool.join()
print('Errors: ' + errors)
if __name__ == '__main__':
main()
|
import os, sys
from array import array
try:
from distance import cdistance
except ImportError:
cdistance = None
from distance import _pyimports as pydistance
if sys.version_info.major < 3:
t_unicode = unicode
t_bytes = lambda s: s
else:
t_unicode = lambda s: s
t_bytes = lambda s: s.encode()
all_types = [
("unicode", t_unicode),
("bytes", t_bytes),
("list", list),
("tuple", tuple),
]
def hamming(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty string
assert func(t(""), t("")) == 0
# common
assert func(t("abc"), t("abc")) == 0
assert func(t("abc"), t("abd")) == 1
# wrong length
try:
func(t("foo"), t("foobar"))
except ValueError:
pass
try:
func(t(""), t("foo"))
except ValueError:
pass
# normalization
assert func(t(""), t(""), normalized=True) == 0.0
assert func(t("abc"), t("abc"), normalized=True) == 0.0
assert func(t("ab"), t("ac"), normalized=True) == 0.5
assert func(t("abc"), t("def"), normalized=True) == 1.0
def fast_comp(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t("")) == 0
assert func(t(""), t("a")) == func(t("a"), t("")) == 1
# edit ops
assert func(t("aa"), t("aa")) == 0
assert func(t("ab"), t("aa")) == 1
assert func(t("ab"), t("a")) == 1
assert func(t("ab"), t("abc")) == 1
# dist limit
assert func(t("a"), t("bcd")) == func(t("bcd"), t("a")) == -1
# transpositions
assert func(t("abc"), t("bac"), transpositions=True) == \
func(t("bac"), t("abc"), transpositions=True) == 1
def levenshtein(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t("")) == 0
assert func(t(""), t("abcd")) == func(t("abcd"), t("")) == 4
# edit ops
assert func(t("aa"), t("aa")) == 0
assert func(t("ab"), t("aa")) == 1
assert func(t("ab"), t("a")) == 1
assert func(t("ab"), t("abc")) == 1
# dist limit
assert func(t("a"), t("b"), max_dist=0) == -1
assert func(t("a"), t("b"), max_dist=1) == 1
assert func(t("foo"), t("bar"), max_dist=-1) == 3
def nlevenshtein(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t(""), 1) == func(t(""), t(""), 2) == 0.0
assert func(t(""), t("foo"), 1) == func(t("foo"), t(""), 1) == \
func(t(""), t("foo"), 2) == func(t("foo"), t(""), 2) == 1.0
assert func(t("aa"), t("aa"), 1) == func(t("aa"), t("aa"), 2) == 0.0
assert func(t("ab"), t("aa"), 1) == func(t("ab"), t("aa"), 2) == 0.5
assert func(t("ab"), t("a"), 1) == func(t("ab"), t("a"), 2) == 0.5
assert func(t("ab"), t("abc"), 1) == func(t("ab"), t("abc"), 2) == 0.3333333333333333
# multiple alignments
assert func(t("abc"), t("adb"), 1) == 0.6666666666666666
assert func(t("abc"), t("adb"), 2) == 0.5
def lcsubstrings(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
try:
assert func(t(""), t(""), False) == set()
except TypeError:
if t is not list: raise
assert func(t(""), t(""), True) == (0, ())
try:
assert func(t(""), t("foo"), False) == func(t("foo"), t(""), False) == set()
except TypeError:
if t is not list: raise
assert func(t(""), t("foo"), True) == func(t("foo"), t(""), True) == (0, ())
# common
try:
assert func(t("abcd"), t("cdba"), False) == {t('cd')}
except TypeError:
if t is not list: raise
assert func(t("abcd"), t("cdba"), True) == (2, ((2, 0),))
# reverse
try:
assert func(t("abcdef"), t("cdba"), False) == func(t("cdba"), t("abcdef"), False)
except TypeError:
if t is not list: raise
assert func(t("abcdef"), t("cdba"), True) == func(t("cdba"), t("abcdef"), True)
def itors_common(func, t, **kwargs):
if kwargs["lang"] == "C":
# types check; only need to do it for C impl to avoid an eventual segfaults.
tr | y: func(1, t("foo"))
except ValueError: pass
itor = func(t("foo"), [t("foo"), 3333])
next(itor)
try: next(itor)
except ValueError: pass
# values drop
itor = func(t("aa"), [t("aa"), t("abcd"), t("ba")])
assert next(itor) == (0, t("aa"))
assert next(itor) == (1, t("ba" | ))
def ilevenshtein(func, t, **kwargs):
itors_common(lambda a, b: func(a, b, max_dist=2), t, **kwargs)
def ifast_comp(func, t, **kwargs):
itors_common(func, t, **kwargs)
#transpositions
g = func(t("abc"), [t("bac")], transpositions=False)
assert next(g) == (2, t('bac'))
g = func(t("abc"), [t("bac")], transpositions=True)
assert next(g) == (1, t("bac"))
write = lambda s: sys.stderr.write(s + '\n')
tests = ["hamming", "fast_comp", "levenshtein", "lcsubstrings", "nlevenshtein", "ilevenshtein", "ifast_comp"]
def run_test(name):
if cdistance:
cfunc = getattr(cdistance, name)
run_lang_test(name, cfunc, "C")
write("")
pyfunc = getattr(pydistance, name)
run_lang_test(name, pyfunc, "py")
if cdistance is None:
write("skipped C tests")
write("")
def run_lang_test(name, func, lang):
print("%s (%s)..." % (name, lang))
for tname, typ in all_types:
write("type: %s" % tname)
globals()[name](func, typ, lang=lang)
if __name__ == "__main__":
args = sys.argv[1:]
if not args:
for test in tests:
run_test(test)
sys.exit()
for name in args:
if name in tests:
run_test(name)
else:
write("no such test: %s" % name)
sys.exit(1)
|
import json
from util import d
import os
__home = os.path.expanduser("~").replace('\\', '/') + "/PixelWeb/"
BASE_SERVER_CONFIG = d({
"id":"server_config",
"display": "server_config",
"preconfig": False,
"presets":[],
"params": [{
"id": "external_access",
| "label": "Allow External Ac | cess",
"type": "bool",
"default": True,
"help":"On: Other computers on your network can access PixelWeb. Off: LocalHost access only."
},{
"id": "port",
"label": "Server Port",
"type": "int",
"default": 8080,
"help":"Port to listen on."
},{
"id": "load_defaults",
"label": "Load Last Config on Start",
"type": "bool",
"default": False,
"help":"Load last driver/controller configuration on application start."
},
{
"id": "show_debug",
"label": "Show Debug in Console",
"type": "bool",
"default": False,
"help":"Show BiblioPixel debug in server console (not in main UI)."
},{
"id": "mod_dirs",
"label": "Module Directories",
"type": "str_multi",
"default": [],
"help":"Directories from which to load modules (animations, drivers, controllers, pre-configs).",
"replace": {"\\":"/"}
},
{
"id": "off_anim_time",
"label": "All Off Timeout",
"type": "int",
"default": 10,
"min": 0,
"max": 3600,
"help":"Keep display off when not running an animation by actively turning all pixels off every X seconds. Set to 0 to disable."
},]
});
def setHome(home):
global __home
__home = home
def genDefaultConfig(params):
c = {}
for p in params:
p = d(p)
c[p.id] = p.default
return c
def initConfig():
try:
if not os.path.exists(__home):
print "Creating {}".format(__home)
os.makedirs(__home)
except:
print "Failed to initialize PixelWeb config!"
def readConfig(file, key = None, path=None):
if not path:
path = __home
data = {}
try:
with open(path + "/" + file + ".json", "r") as fp:
data = json.load(fp, encoding='utf-8')
if key:
if key in data:
data = data[key]
else:
data = {}
except Exception, e:
pass
return d(data)
def writeConfig(file, data, key = None, path=None):
if not path:
path = __home
base = data
if key:
base = readConfig(file, path=path)
base[key] = data
with open(path + "/" + file + ".json", "w") as fp:
json.dump(base, fp, indent=4, sort_keys=True)
def paramsToDict(params):
data = {}
for p in params:
if "default" not in p:
p.default = None
data[p.id] = p.default
return data
def readServerConfig():
data = readConfig("config", path=__home)
base = paramsToDict(BASE_SERVER_CONFIG.params)
if len(data.keys()) == 0:
data = paramsToDict(BASE_SERVER_CONFIG.params)
elif len(data.keys()) != len(base.keys()):
data.upgrade(base)
return d(data)
def writeServerConfig(data):
writeConfig("config", data)
def upgradeServerConfig():
b = genDefaultConfig(BASE_SERVER_CONFIG.params)
cfg = readServerConfig()
cfg.upgrade(b)
writeServerConfig(cfg)
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogg | er(__name__)
class ComputeCapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
# Instance type and host capabilities do not change within a request
run_filter_once_per_request = True
def _get_capabilities(self, host_state, scope):
cap = host_state
for index in range(0, len(scope) | ):
try:
if isinstance(cap, six.string_types):
try:
cap = jsonutils.loads(cap)
except ValueError as e:
LOG.debug("%(host_state)s fails. The capabilities "
"'%(cap)s' couldn't be loaded from JSON: "
"%(error)s",
{'host_state': host_state, 'cap': cap,
'error': e})
return None
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
cap = cap.stats.get(scope[index], None)
else:
cap = getattr(cap, scope[index], None)
else:
cap = cap.get(scope[index], None)
except AttributeError as e:
LOG.debug("%(host_state)s fails. The capabilities couldn't "
"be retrieved: %(error)s.",
{'host_state': host_state, 'error': e})
return None
if cap is None:
LOG.debug("%(host_state)s fails. There are no capabilities "
"to retrieve.",
{'host_state': host_state})
return None
return cap
def _satisfies_extra_specs(self, host_state, instance_type):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in instance_type:
return True
for key, req in instance_type.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
# whether host_state contains the key as an attribute. If not,
# ignore it. If it contains, deal with it in the same way as
# 'capabilities:key'. This is for backward-compatible.
# If the key has a namespace, the scope's size will be bigger than
# 1, check that whether the namespace is 'capabilities'. If not,
# ignore it.
if len(scope) == 1:
stats = getattr(host_state, 'stats', {})
has_attr = hasattr(host_state, key) or key in stats
if not has_attr:
continue
else:
if scope[0] != "capabilities":
continue
else:
del scope[0]
cap = self._get_capabilities(host_state, scope)
if cap is None:
return False
if not extra_specs_ops.match(str(cap), req):
LOG.debug("%(host_state)s fails extra_spec requirements. "
"'%(req)s' does not match '%(cap)s'",
{'host_state': host_state, 'req': req,
'cap': cap})
return False
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create instance_type."""
instance_type = spec_obj.flavor
if not self._satisfies_extra_specs(host_state, instance_type):
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True
|
#!/usr/bin/env python
"""
.. module:: camerastation.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results. GPL v2.0 license Copyright (C)
2010- Zoltan Siki <siki.zoltan@epito.bme.hu>
.. moduleauthor:: Bence Turak <bence.turak@gmail.com>
"""
import sys
#sys.path.append('ulyxes/pyapi/')
#sys.path.append('lib/')
from totalstation import TotalStation
#from serialiface import SerialIface
from camera import Camera
#from steppermotor import StepperMotor
from imgprocess import ImgProcess
import numpy as np
import os
import cv2
import recognition as rec
from angle import Angle
import math
import time
class CameraStation(TotalStation, Camera):
'''CameraStation class for TotalStation combinated with camera
:param name: name of instrument
:param measureUnit: measure unit part of instrument
:param measureIface: interface to physical unit
:param writerUnit: store data, default None
'''
#constants
#FOCUS_CLOSER = 1
#FOCUS_FARTHER = 2
def __init__(self, name, measureUnit, measureIface, writerUnit = None):
'''constructor
'''
TotalStation.__init__(self, name, measureUnit, measureIface, writerUnit)
Camera.__init__(self, name, measureUnit, measureIface, writerUnit)
#StepperMotor.__init__(self, stepperMotorUnit, speed, halfSteps)
self._affinParams = None
def LoadAffinParams(self, file):
"""Load affin params to measure on pictures
:param file: name of the params file (It have to be .npy file)
"""
self._affinParams = np.load(file)
def PicMes(self, photoName, targetType = None):
'''Measure angles between the target and the optical axis
:param photoName: name of the photo
:param targetType: type of the target
:returns: horizontal (hz) and vertical (v) correction angle in dictionary
'''
ok = False
while not ok:
print(photoName)
file = open(photoName, 'w+b')
print((int(self._affinParams[0,3]), int(self._affinParams[1,3])))
ang = self.GetAngles()
self.TakePhoto(file, (int(self._affinParams[0,3]), int(self._affinParams[1,3])))
file.close()
try:
img = cv2.imread(photoName, 1)
picCoord = rec.recogChessPattern(img)
print(picCoord)
ok = True
except Exception:
pass
img[int(picCoord[1]),:] = [0,255,255]
img[:,int(picCoord[0])] = [0,255,255]
cv2.imwrite(photoName, img)
angles = {}
angles[' | hz'] = Angle(1/math.sin(ang['v'].GetAngle('RAD'))*(self._affinParams[0,1]*(picCoord[0] - round(self._affinParams[0,0])) + self._affinParams[0,2]*(picCoord[1] - round(self._affinParams[1,0]))))
angles['v'] = Angle(self._affinParams[1,1]*(picCoord[0] - round(self._affinParams[0,0])) + self._affinParams | [1,2]*(picCoord[1] - round(self._affinParams[1,0])))
return angles
def GetAbsAngles(self, targetType = None):
"""Get absolute angles with automatical target recognition (not prism)
:param targetType: type of target (None)
:returns: corrected horinzontas (hz) and vertical (v) angles in dictionary. It contains the last correction angles too.
"""
t = time.localtime()
picName = str(t.tm_year) + '_' + str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min) + '_' + str(t.tm_sec) + '.png'
corr = self.PicMes(picName)
ang = self.GetAngles()
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
angles['chz'] = corr['hz']
angles['cv'] = corr['v']
i = 0
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
while abs(corr['hz'].GetAngle('SEC')) > 6 or abs(corr['v'].GetAngle('SEC')) > 6:
self.Move(angles['hz'], angles['v'])
corr = self.PicMes(picName)
ang = self.GetAngles()
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
angles['chz'] = corr['hz']
angles['cv'] = corr['v']
print(i)
i += 1
return angles
def FollowTarget(self):
"""Following target (beta)
"""
t = time.localtime()
picName = str(t.tm_year) + '_' + str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min) + '_' + str(t.tm_sec) + '.png'
i = 0
while True:
corr = self.PicMes(picName)
ang = self.GetAngles()
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
print(i)
i += 1
if abs(corr['hz'].GetAngle('SEC')) > 6 or abs(corr['v'].GetAngle('SEC')) > 6 :
self.Move(angles['hz'], angles['v'])
return angles
def __del__(self):
'''destructor
'''
pass
|
#!/usr/bin/env python
from subprocess import Popen, PIPE
from sys import argv
__autor__ = "Jose Jiménez"
__email__ = "jjimenezlopez@gmail.com"
__date__ = "2012/05/03"
if len(argv) == 1 or len(argv) > 2:
| print 'Wrong execution format.'
print 'Correct format: any2utf /path/to/the/files'
exit(0)
path = argv[1]
if not path.endswith('/'):
path = path + '/'
path = path.replace(' ', '\ ')
p | roc = Popen('ls ' + path + '*.srt', stdout=PIPE, stderr=PIPE, shell=True)
result = proc.communicate()
if proc.returncode == 2:
print 'SRT files not found in path \'' + path + '\''
list = result[0].splitlines()
for f in list:
aux_f = f
aux_f.replace(' ', '\ ')
# file --mime /path/to/file.srt
#print 'file --mime \"' + aux_f + '\"'
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
result = proc.communicate()[0]
charset = result.split('charset=')[1]
charset = charset.replace('\n', '')
if charset == 'unknown-8bit':
charset = 'iso-8859-15'
if charset != 'utf-8' and charset != 'binary':
# print 'iconv -f ' + charset + ' -t utf-8 ' + aux_f + ' > ' + aux_f + '.utf'
proc = Popen('iconv -f ' + charset + ' -t utf-8 \"' + aux_f + '\" > \"' + aux_f + '.utf\"', stdout=PIPE, shell=True)
result = proc.communicate()[0]
if proc.returncode == 0:
#proc = Popen('rm ' + aux_f, stdout=PIPE, shell=True)
proc = Popen('mv \"' + aux_f + '.utf\" \"' + aux_f + '\"', stdout=PIPE, shell=True)
proc.wait()
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
text = proc.communicate()[0]
print f.split('/')[-1] + ' | ' + charset + ' --> ' + text.split('charset=')[1].replace('\n', '')
else:
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
text = proc.communicate()[0]
print f + ' --> conversion ERROR: ' + text.split('charset=')[1].replace('\n', '')
|
# -*- c | oding:utf-8 -*-
from . import load | ing
from . import registry
|
#!/usr/bin/env python
# Note: this module is not a demo per se, but is used by many of
# the demo modules for various purposes.
import wx
#---------------------------------------------------------------------------
class ColoredPanel(wx.Window):
def __init__(self, parent, color):
wx.Window.__init__(self, parent | , -1, style = wx.SIMPLE_BORDER)
self.SetBack | groundColour(color)
if wx.Platform == '__WXGTK__':
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
#---------------------------------------------------------------------------
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"fo | rget to activate a virtual environment?"
| ) from exc
execute_from_command_line(sys.argv)
|
from skimage.data import coffee, camera
from sklearn_theano.feature_extraction.caffe.googlenet import (
GoogLeNetTransformer, GoogLeNetClassifier)
import numpy as np
from nose import SkipTest
import os
co = coffee().astype(np.float32)
ca = camera().astype(np.float | 32)[:, :, np.newaxis] * np.ones((1, 1, 3),
dtype='float32')
def test_googlenet_transformer():
"""smoke test for googlenet transformer"""
if os.envi | ron.get('CI', None) is not None:
raise SkipTest("Skipping heavy data loading on CI")
t = GoogLeNetTransformer()
t.transform(co)
t.transform(ca)
def test_googlenet_classifier():
"""smoke test for googlenet classifier"""
if os.environ.get('CI', None) is not None:
raise SkipTest("Skipping heavy data loading on CI")
c = GoogLeNetClassifier()
c.predict(co)
c.predict(ca)
|
'''
Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
This module creates N nodes and sends X messages randomly between nodes and records
any failures. The messages are sent in blocks of 100 and then it waits and does it again.
Created on Jul 7, 2014
@author: dfleck
'''
from twisted.trial import unittest
from twisted.internet import reactor, defer
from twisted.python import log
from gmu.chord import NetworkUtils, Config
from gmu.chord.CopyEnvelope import CopyEnvelope
import TestUtils
from ConnectivityCounter import ConnectivityCounter
import datetime
import random, sys
numNodes = 5
numMessages=5000 # Total number of messages to send
numMessagesInBlock=100 # Size of blocks to send them in
class ParallelStressTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ParallelStressTest, cls).setUpClass()
ParallelStressTest.logObs = log.startLogging(sys.stdout)
@classmethod
def tearDownClass(cls):
super(ParallelStressTest, cls).tearDownClass()
if ParallelStressTest.logObs is not None:
ParallelStressTest.logObs.stop()
def setUp(self):
'''Start the reactor so we don't have to do it in the nodes.'''
global numNodes
# Turn off warning
Config.WARN_NO_MESSAGE_AUTHENTICATOR = False
Config.ALLOW_NO_AUTHENTICATOR = True
#log.startLogging(open('parallelStressTest.log', 'w'))
# This is the IP of the node. Note: This MUST be
# an external ID or the code won't work!
sel | f.myIP = NetworkUtils.getNonLoopbackIP (None, None)
self.allNodes = []
self.timeout = (numNodes * 5) + numMessages # How many seconds to tr | y before erroring out
self.connectedNodeList = [] # How many are currently connected?
self.testCounter = -1
def tearDown(self):
# Stop the nodes
# self.leave(None, self.bsNode)
# self.leave(None, self.normalNode)
# #print("Tearing down...")
pass
@defer.inlineCallbacks
def testParallelP2PSending(self):
# Start a bootstrap node
(status, self.bsNode, _observer) = yield TestUtils.startupBootstrapNode(self.myIP, 12345, 'localhost')
self.assertTrue(status, 'Could not build bootstrap node')
self.allNodes.append(self.bsNode)
self.bsNode.addMessageObserver(self.messageReceived)
# Start client nodes
log.msg("Building nodes...")
for i in range(numNodes):
(status, node, observer) = yield TestUtils.startupClientNode(self.myIP, 12346+i, 'localhost', self.bsNode.nodeLocation)
self.assertTrue(status, 'Could not startupClientNode')
self.allNodes.append(node)
# Wait for flooding to reach all the nodes
waiter = ConnectivityCounter()
yield waiter.waitForConnectivity(numNodes+1, self.bsNode) # Does not count bsNode itself.
# Now do the stress test
status = yield self.doStressTest()
# Now close it all down!
yield self.allLeave()
# Wait a second or two
yield TestUtils.wait(3)
defer.returnValue(True)
@defer.inlineCallbacks
def doStressTest(self):
'''Randomly pick two nodes and send a message between them. Verify that it goes.'''
print("Running parallel stress test: %d p2p messages" % numMessages)
messageCounter = 0
while messageCounter < numMessages:
if messageCounter % 100 == 0:
print("Running test %d of %d" % (messageCounter, numMessages))
statusList = []
for _ in range(numMessagesInBlock):
messageCounter += 1
(srcNode, dstNode) = random.sample(self.allNodes, 2)
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = srcNode.nodeLocation
env['type'] = 'p2p'
env['destination'] = dstNode.nodeLocation.id
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
msgText = "Test number %d " % messageCounter
statusList.append(srcNode.sendSyncMessage(msgText, env))
# Now wait for all of them to complete
dl = defer.DeferredList(statusList)
results = yield dl # Wait for it
# Now check all the return codes
for (success, _) in results:
#print("DEBUG: doStressTest Result is %s" % success)
self.assertTrue(success, "doStressTest Message returned False!" )
# Wait a bit... just to ease up a smidge.
yield TestUtils.wait(0.1)
defer.returnValue(True)
def messageReceived(self, msg, dummy_Envelope):
'''This is a receiver for the bootstrap node only!
We got a message. For flooding pingbacks the message format is:
type:PINGBACK
loc:sender
msgNum:number
'''
if not isinstance(msg, dict):
return
if 'type' in msg:
theType= msg['type']
if theType == "PINGBACK":
if msg['msgNum'] == 0: # Setup message only
# Add the sender to the list of nodes we know of
self.addNode(msg['loc'])
#print("Metrics NetworkConnect addNode: %s" % str(msg['loc']))
elif msg['msgNum'] == self.testCounter:
# We have a message from a current PING, count it!
self.connectedNodeList.append(msg['loc'])
else:
# Typically this means a message came in late
log.msg("ParallelStressTest got an unknown message:%s" % msg)
def allLeave(self):
'''Tell the node to leave the network.'''
for node in self.allNodes:
node.leave()
return True |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template.context_processors import csrf
from common import post_required, login_required, redirect_if_loggedin, __redirect
# Create your views here.
def index(request):
data | = {'title': 'Error', 'page':'home'};
# return HttpResponse ('This is Invalid Request')
file = device.get_template(request, 'error_erro | r.html')
return render(request, file, data)
def invalid_request_view(request):
data = {'title': 'Invalid Request', 'page':'home'};
# return HttpResponse ('This is Invalid Request')
file = device.get_template(request, 'error_invalid_request.html')
return render(request, file, data)
def under_construction_view(request):
data = {'title': 'Under Construction', 'page':'home'};
file = device.get_template(request, 'error_under_construction.html')
return render(request, file, data)
|
from django import template
from django.conf import settings
from django.db.m | odels import get_model
from django.template import defaultfilters, loader
from .. import library
from .. import renderers
from ..dashboard import forms
ContentType = get_model('contenttypes', 'ContentType')
register = template.Library()
@register.assignment_tag
def update_blocks_form(page, container_name):
container = page.get_container_from_name(container_name)
if not container:
return None
return forms.BlockUpdateSelectForm(container)
@register.simple_ | tag(takes_context=True)
def render_attribute(context, attr_name, *args):
"""
Render an attribute based on editing mode.
"""
block = context.get(renderers.BlockRenderer.context_object_name)
value = getattr(block, attr_name)
for arg in args:
flt = getattr(defaultfilters, arg)
if flt:
value = flt(value)
user = context.get('request').user
if not user.is_authenticated:
return unicode(value)
if not user.is_staff:
return unicode(value)
wrapped_attr = u'<span id="block-{uuid}-{attr_name}">{value}</span>'
return wrapped_attr.format(
uuid=block.uuid, attr_name=attr_name, value=unicode(value))
@register.assignment_tag(takes_context=True)
def get_object_visibility(context, obj):
try:
return obj.is_visible
except AttributeError:
pass
return True
@register.simple_tag(takes_context=True)
def render_block_form(context, form):
model = form._meta.model
model_name = model.__name__.lower()
template_names = [
"%s/%s_form.html" % (model._meta.app_label, model_name),
"fancypages/blocks/%s_form.html" % model_name, form.template_name]
tmpl = loader.select_template(template_names)
context['missing_image_url'] = "%s/%s" % (
settings.MEDIA_URL, getattr(settings, "OSCAR_MISSING_IMAGE_URL", ''))
return tmpl.render(context)
@register.filter
def depth_as_range(depth):
# reduce depth by 1 as treebeard root depth is 1
return range(depth - 1)
@register.assignment_tag
def get_content_type(obj):
return ContentType.objects.get_for_model(obj.__class__)
@register.inclusion_tag(
'fancypages/dashboard/block_select.html', takes_context=True)
def render_block_selection(context):
request = context.get('request')
if not request or not request.fp_edit_mode:
return u''
grouped_blocks = library.get_grouped_content_blocks()
return {'grouped_blocks': grouped_blocks}
|
#!/usr/bin/python
## Download files from Amazon S3 (e.g. raw photos for 3D models)
## Andy Bevan 15-Jun-2014, updated 21-Nov-2014
## Daniel Pett updated 05-Jan-2016
__author__ = 'ahb108'
## Currently for Python 2.7.5 (tested on MacOSX 10.9.2) launched in a virtual environment:
from PIL import Image # Pillow with libjpeg support
from PIL import ImageDraw
import urllib3
import json
import re
import numpy as np
import argparse
import os
import urllib2
import zipfile
# Argument parser
parser = argparse.ArgumentParser(description='This is a script to combine vector polygon masks into a binary raster mask for 3d modelling.')
parser.add_argument('-a','--app',help='MicroPasts application', required=True)
parser.add_argument('-w','--wd', help='Working directory',required=True)
args = parser.parse_args()
## Global settings ##
os.chdir(args.wd)
app = args.app
pybinst = 'http://crowdsourced. | micropasts.org'
###################################
# Get the raw jpg files from working directory
ext = ['.JPG', '.jpg', '.jpeg', '.JPEG']
files = [ f for f in os.listdir('.') if f.endswith(tuple(ext)) ]
print("Masking each individual photograph...")
for q in range(0, len(files)):
# Open an example image
img = Image.open(files[q])
imnameonly = | os.path.splitext(files[q])[0]
# Get JSON data for tasks and find task ID for this file
downloadURL = str(pybinst) + '/project/' + str(app) + '/tasks/export?type=task&format=json'
outputFilename = str(app) + '_task.json'
# Download JSON file to working direcory
response = urllib2.urlopen(downloadURL)
zippedData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(zippedData)
output.close()
# Extract the data
zfobj = zipfile.ZipFile(outputFilename)
for name in zfobj.namelist():
uncompressed = zfobj.read(name)
# Save uncompressed data to disk
outputFilename = name
output = open(outputFilename,'wb')
output.write(uncompressed)
output.close()
with open(outputFilename) as data_file:
jtasks = json.load(data_file)
# Loop through looking for those tasks with the necessary look-up image (almost always one
# unless tasks have been duplicated, but allowing more than one just in case)
imtasks = []
for elm in range(0, len(jtasks)):
onetask = jtasks[elm]
onetaskurl = onetask['info']['url_b'].encode('utf-8')
if re.search(files[q], onetaskurl): imtasks.extend([onetask['id']])
# Get JSON data for task runs (even if they are duplicated)
jtaskruns = []
for a in range(0, len(imtasks)):
downloadURL = str(pybinst) + '/project/' + str(app) + '/' + str(imtasks[a]) + '/results.json'
outputFilename = str(app) + str(imtasks[a]) + '_task_run.json'
# Download JSON files to working direcory
response = urllib2.urlopen(downloadURL)
fileData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(fileData)
output.close()
with open(outputFilename) as data_file:
jtaskruns.extend(json.load(data_file))
# Loop through and extract outlines
for a in range(0, len(jtaskruns)):
jtaskrun = jtaskruns[a] # one contributor
imtmp = Image.new("L", img.size, color=0)
draw = ImageDraw.Draw(imtmp)
# Loop through outline (or possible multiple outline polygons)
for outs in range(0, len(jtaskrun['info']['outline'])):
# Extract the outline and convert to tuples
o0 = jtaskrun['info']['outline'][outs][0]
p = [] # Empty list for outline vertices
h = img.size[1] # Get image height
for x in range(0, len(o0)):
xy = o0[x]
xy[1] = h - xy[1] # reverse y-coordinates
p.append(tuple(xy))
draw.polygon(tuple(p), fill=255)
# Loop through holes in same way
for hls in range(0, len(jtaskrun['info']['holes'])):
h0 = jtaskrun['info']['holes'][hls][0]
ph = []
for x in range(0, len(h0)):
xy = h0[x]
xy[1] = h - xy[1]
ph.append(tuple(xy))
draw.polygon(tuple(ph), fill=0)
# imtmp.show()
if jtaskrun['user_id'] is None:
fn = imnameonly + '_mask_' + str(a) + '_anon.JPG'
else:
fn = imnameonly + '_mask_' + str(a) + '_user' + str(jtaskrun['user_id']) + '.JPG'
imtmp.save(fn)
if a is 1:
fn1 = imnameonly + '_mask.JPG'
imtmp.save(fn1)
print("Done.")
|
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
from maple.core import logging
from maple.core import static_info
from maple.core import testing
from maple.race import testing as race_testing
from maple.systematic import program
from maple.systematic import search
class ChessTestCase(testing.DeathTestCase):
""" Run a test under the CHESS scheduler.
"""
def __init__(self, test, mode, threshold, controller):
testing.DeathTestCase.__init__(self, test, mode, threshold)
self.controller = controller
def threshold_check(self):
if self.search_done():
return True
if testing.DeathTestCase.threshold_check(self):
return True
return False
def search_done(self):
sinfo = static_info.StaticInfo()
sinfo.load(self.controller.knobs['sinfo_out'])
prog = program.Program(sinfo)
prog.load(self.controller.knobs['program_out'])
search_info = search.SearchInfo(sinfo, program)
search_info.load(self.controller.knobs['search_out'])
return search_info.done()
def after_each_test(self):
iteration = len(self.test_history)
used_time = self.test_history[-1].used_time()
logging.msg('=== chess iteration %d done === (%f) (%s)\n' % (iteration, used_time, os.getcwd()))
def after_all_tests(self):
if self.is_fatal():
logging.msg('chess fatal error detected\n')
else:
logging.msg('chess threshold reached\n')
def log_stat(self):
runs = len(self.test_histo | ry)
used_time = self.used_time()
logging.msg('%-15s %d\n' % ('chess_runs', runs))
logging.m | sg('%-15s %f\n' % ('chess_time', used_time))
class RaceTestCase(race_testing.TestCase):
""" Run race detector to find all racy instructions.
"""
def __init__(self, test, mode, threshold, profiler):
race_testing.TestCase.__init__(self, test, mode, threshold, profiler)
class ChessRaceTestCase(testing.TestCase):
""" Run race detecctor to find all racy instructions first, and
then run the chess scheduler with sched_race on.
"""
def __init__(self, race_testcase, chess_testcase):
testing.TestCase.__init__(self)
self.race_testcase = race_testcase
self.chess_testcase = chess_testcase
def is_fatal(self):
assert self.done
if self.race_testcase.is_fatal() or self.chess_testcase.is_fatal():
return True
else:
return False
def body(self):
self.race_testcase.run()
if self.race_testcase.is_fatal():
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
else:
self.chess_testcase.run()
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
self.chess_testcase.log_stat()
|
from __future__ import print_function
from ADSOr | cid.models import ClaimsLog
from ADSOrcid import tasks
from collections import defaultdict
app = tasks.app
def run():
stats = defaultdict(lambda: 0)
authors = {}
i = 0
with app.session_scope() as session:
for r in session.query(Clai | msLog).order_by(ClaimsLog.id.asc()).yield_per(1000):
stats[r.status] += 1
if r.orcidid and r.bibcode:
if r.orcidid not in authors:
authors[r.orcidid] = {'claimed': 0, 'forced': 0, '#full-import': 0, 'updated': 0, 'removed': 0, 'unchanged': 0}
authors[r.orcidid][r.status] += 1
if i % 100000 == 0:
print('read ', i, 'rows')
i += 1
print('read', i, 'rows')
print(stats)
print(authors)
if __name__ == '__main__':
run() |
from tastypie import fields
from tastypie.bundle import Bundle
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from api.authorization import DateaBaseAuthorization
from api.authentication import ApiKeyPlusWebAuthentication
from api.base_resources import JSONDefaultMixin
from api.serializers import UTCSerializer
from django.template.defaultfilters import linebreaksbr
from tastypie.cache import SimpleCache
from tastypie.throttle import CacheThrottle
from django.contrib.contenttypes.models import ContentType
from account.utils import get_domain_from_url
from comment.models import Comment
class CommentResource(JSONDefaultMixin, ModelResource):
user = fields.ToOneField('account.resources.UserResource',
attribute='user', full=True, readonly=True)
def dehydrate(self, bundle):
user_data = {
'username': bundle.data['user'].data['username'],
'image_small': bundle.data['user'].data['image_small'],
'id': bundle.data['user'].data['id']
}
bundle.data['user'] = u | ser_data
bundle.data['content_type'] = bundle.obj.content_type.model
| return bundle
def hydrate(self,bundle):
# preserve data
if bundle.request.method == 'PATCH':
#preserve original fields
fields = ['user', 'published', 'content_type', 'object_id', 'created', 'client_domain']
orig_obj = Comment.objects.get(pk=int(bundle.data['id']))
for f in fields:
if f in request.data:
request.data[f] = getattr(orig_obj, f)
elif bundle.request.method == 'POST':
# enforce post user
bundle.obj.user = bundle.request.user
bundle.data['user'] = bundle.request.user.id
# convert model name into model
bundle.obj.content_type = ContentType.objects.get(model=bundle.data['content_type'])
bundle.obj.client_domain = get_domain_from_url(bundle.request.META.get('HTTP_ORIGIN', ''))
del bundle.data['content_type']
return bundle
def apply_sorting(self, obj_list, options=None):
if options is None:
options = {}
else:
options = options.copy()
if not 'order_by' in options:
options['order_by'] = 'created'
return super(CommentResource, self).apply_sorting(obj_list, options)
class Meta:
queryset = Comment.objects.all()
resource_name = 'comment'
allowed_methods = ['get', 'post', 'patch', 'delete']
serializer = UTCSerializer(formats=['json'])
filtering={
'id' : ['exact'],
'user': ALL_WITH_RELATIONS,
'content_type': ALL_WITH_RELATIONS,
'object_id': ['exact']
}
authentication = ApiKeyPlusWebAuthentication()
authorization = DateaBaseAuthorization()
limit = 50
excludes = ['client_domain']
ordering=['created']
#cache = SimpleCache(timeout=5)
throttle = CacheThrottle(throttle_at=500)
always_return_data = True
include_resource_uri = False
def get_comment_resource_class():
return CommentResource
|
"""
A special type of hypothesis whose value is a function.
The function is automatically eval-ed when we set_value, and is automatically hidden and unhidden when we pickle
This can also be called like a function, as in fh(data)!
"""
from Hypothesis import Hypothesis
from copy import copy
class FunctionHypothesis(Hypothesis):
"""
A special type of hypothesis whose value is a function.
The function is automatically eval-ed when we set_value, and is automatically hidden and unhidden when we | pickle
This can also be called like a function, as in fh(data)!
"""
def __init__(self, value=None, f=None, display="lambda x: | %s", **kwargs):
"""
*value* - the value of this hypothesis
*f* - defaultly None, in which case this uses self.value2function
*args* - the arguments to the function
"""
# this initializes prior and likleihood variables, so keep it here!
# However, don't give it value, since then it calls set_value with no f argument!
Hypothesis.__init__(self, None, display=display, **kwargs)
# And set our value
self.set_value(value, f=f)
def __call__(self, *vals):
# The below assertions are useful but VERY slow
#assert not any([isinstance(x, FunctionData) for x in vals]), "*** Probably you mean to pass FunctionData.input instead of FunctionData?"
#assert callable(self.fvalue)
return self.fvalue(*vals)
def compile_function(self):
"""
Takes my value and returns what function I compute. Internally cached by set_value
NOTE: This must be overwritten by subclasses to something useful--see LOTHypothesis
"""
raise NotImplementedError
def set_value(self, value, f=None):
"""
Sets the value for the hypothesis.
Another option: send f if speed is necessary
"""
Hypothesis.set_value(self, value)
if f is not None:
self.fvalue = f
elif value is None:
self.fvalue = None
else:
self.fvalue = self.compile_function() # now that the value is set
def force_function(self, f):
"""
Sets the function to f, ignoring value.
:param f: - a python function (object)
:return:
"""
self.set_value( "<FORCED_FUNCTION>", f=f)
def compute_single_likelihood(self, datum):
"""
A function that must be implemented by subclasses to compute the likelihood of a single datum/response pair.
This should NOT implement the temperature (that is handled by compute_likelihood)
"""
raise NotImplementedError
# ~~~~~~~~~
# Make this thing pickleable
def __getstate__(self):
""" We copy the current dict so that when we pickle, we destroy the function"""
dd = copy(self.__dict__)
dd['fvalue'] = None # clear the function out
return dd
def __setstate__(self, state):
"""
sets the state of the hypothesis (when we unpickle)
"""
self.__dict__.update(state)
self.set_value(self.value) # just re-set the value so that we re-compute the function
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Packing metadata for setuptools."""
from io import open
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
with open('README.rst', encoding='utf-8') as readme_file:
readme = readme_file.read()
wit | h open('HISTORY.rst', encoding='utf-8') as | history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'orderedset',
'hamster-lib >= 0.13.0',
]
setup(
name='hamster-gtk',
version='0.11.0',
description="A GTK interface to the hamster time tracker.",
long_description=readme + '\n\n' + history,
author="Eric Goller",
author_email='eric.goller@projecthamster.org',
url='https://github.com/projecthamster/hamster-gtk',
packages=find_packages(exclude=['tests*']),
install_requires=requirements,
license="GPL3",
zip_safe=False,
keywords='hamster-gtk',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
entry_points='''
[gui_scripts]
hamster-gtk=hamster_gtk.hamster_gtk:_main
''',
package_data={
'hamster_gtk': ['resources/hamster-gtk.gresource'],
},
)
|
import numpy as np
NR_PER_CONDITION = 1000
neuro_sigma = 4
neuro_mean = 0
satis_sigma = 4
satis_mean = 0
print "Start drawing"
bins = {
5: np.array([-6, -3, 0, 3, 6]),
7: np.array([-6, -4, -2, 0, 2, 4, 6])
}
borders = {
5: np.array([-4.5,-1.5,1.5,4.5]),
7: np.array([-5.,-3.,-1.,1,3,5])
}
'output.dat'
conditions = [
{'cond': 1, 'first': 5, 'second': 5},
{'cond': 2, 'first': 7, 'second': 7},
{'cond': 3, 'first': 5, 'second': 7},
{'cond': 4, 'first': 7, 'second': 5}
]
neuro_vals = np.empty([12,NR_PER_CONDITION])
satis_vals = np.empty([12,NR_PER_CONDITION])
outfile = file('output.dat', 'w')
outfile.write('cond')
for i in range(12):
outfile.write('\tneuro'+str(i+1))
for i in range(12):
outfile.write('\tsatis'+st | r(i+1))
outfile.write('\n')
for cond in conditions:
print "Writing con | dition ", cond['cond']
for i in range(12):
neuro = neuro_sigma * np.random.randn(NR_PER_CONDITION) + neuro_mean
neuro_index = np.digitize(neuro, borders[cond['first']])
neuro_vals[i] = bins[cond['first']][neuro_index]
satis = satis_sigma * np.random.randn(NR_PER_CONDITION) + satis_mean
satis_index = np.digitize(satis, borders[cond['second']])
satis_vals[i] = bins[cond['second']][satis_index]
cond_arr = np.full([1,NR_PER_CONDITION], cond['cond'])
output = np.concatenate((cond_arr, neuro_vals, satis_vals) )
np.savetxt(outfile, output.transpose(), fmt="%2i")
outfile.close()
print "Finished"
|
from _ | _future__ import | unicode_literals
from django.apps import AppConfig
class ImagerImagesConfig(AppConfig):
name = 'imager_images'
def ready(self):
"""Run when app ready."""
from imager_images import signals
|
class ProjectManager(object):
def create(self | , project_name):
pass
def delete(self, project_name):
pass
def list_projects(self):
pass
def up | load_file(self, project_name, filename):
pass
|
import pytest
class TestService:
@pytest.mark.complete("service ")
def test_1(self | , completion): |
assert completion
|
ute_import
from setuptools import setup, find_packages, Extension, Command
from setuptools.command.build_ext import build_ext
from setuptools.command.egg_info import egg_info
from distutils.file_util import copy_file
from distutils.dir_util import mkpath, remove_tree
from dis | tutils.util import get_platform
from distutils import log
import os
import sys
import subprocess
if sys.version_info[:2] < (3, 6):
sys.exit(
"error: Python 3.6 is required to run setup.py. \n"
"The generated wheel will be compatible with both py27 and py3+"
)
cmdclass = {}
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
pass
else:
class UniversalBdistWheel(bdist_wheel):
def get_tag(self):
retur | n ("py2.py3", "none") + bdist_wheel.get_tag(self)[2:]
cmdclass["bdist_wheel"] = UniversalBdistWheel
class Download(Command):
user_options = [
("version=", None, "ots source version number to download"),
("sha256=", None, "expected SHA-256 hash of the source archive"),
("download-dir=", "d", "where to unpack the 'ots' dir (default: src/c)"),
("clean", None, "remove existing directory before downloading"),
]
boolean_options = ["clean"]
URL_TEMPLATE = (
"https://github.com/khaledhosny/ots/releases/download/"
"v{version}/ots-{version}.tar.xz"
)
def initialize_options(self):
self.version = None
self.download_dir = None
self.clean = False
self.sha256 = None
def finalize_options(self):
if self.version is None:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError("must specify --version to download")
if self.sha256 is None:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError("must specify --sha256 of downloaded file")
if self.download_dir is None:
self.download_dir = os.path.join("src", "c")
self.url = self.URL_TEMPLATE.format(**vars(self))
def run(self):
from urllib.request import urlopen
from io import BytesIO
import tarfile
import lzma
import hashlib
output_dir = os.path.join(self.download_dir, "ots")
if self.clean and os.path.isdir(output_dir):
remove_tree(output_dir, verbose=self.verbose, dry_run=self.dry_run)
if os.path.isdir(output_dir):
log.info("{} was already downloaded".format(output_dir))
else:
archive_name = self.url.rsplit("/", 1)[-1]
mkpath(self.download_dir, verbose=self.verbose, dry_run=self.dry_run)
log.info("downloading {}".format(self.url))
if not self.dry_run:
# response is not seekable so we first download *.tar.xz to an
# in-memory file, and then extract all files to the output_dir
# TODO: use hashlib to verify the SHA-256 hash
f = BytesIO()
with urlopen(self.url) as response:
f.write(response.read())
f.seek(0)
actual_sha256 = hashlib.sha256(f.getvalue()).hexdigest()
if actual_sha256 != self.sha256:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"invalid SHA-256 checksum:\n"
"actual: {}\n"
"expected: {}".format(actual_sha256, self.sha256)
)
log.info("unarchiving {} to {}".format(archive_name, output_dir))
if not self.dry_run:
with lzma.open(f) as xz:
with tarfile.open(fileobj=xz) as tar:
filelist = tar.getmembers()
first = filelist[0]
if not (first.isdir() and first.name.startswith("ots")):
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"The downloaded archive is not recognized as "
"a valid ots source tarball"
)
# strip the root 'ots-X.X.X' directory before extracting
rootdir = first.name + "/"
to_extract = []
for member in filelist[1:]:
if member.name.startswith(rootdir):
member.name = member.name[len(rootdir) :]
to_extract.append(member)
tar.extractall(output_dir, members=to_extract)
class Executable(Extension):
if os.name == "nt":
suffix = ".exe"
else:
suffix = ""
def __init__(self, name, script, options=None, output_dir=".", cwd=None, env=None):
Extension.__init__(self, name, sources=[])
self.target = self.name.split(".")[-1] + self.suffix
self.script = script
self.options = options or []
self.output_dir = output_dir
self.cwd = cwd
self.env = env
class ExecutableBuildExt(build_ext):
def finalize_options(self):
from distutils.ccompiler import get_default_compiler
build_ext.finalize_options(self)
if self.compiler is None:
self.compiler = get_default_compiler(os.name)
self._compiler_env = dict(os.environ)
def get_ext_filename(self, ext_name):
for ext in self.extensions:
if isinstance(ext, Executable):
return os.path.join(*ext_name.split(".")) + ext.suffix
return build_ext.get_ext_filename(self, ext_name)
def run(self):
self.run_command("download")
if self.compiler == "msvc":
self.call_vcvarsall_bat()
build_ext.run(self)
def call_vcvarsall_bat(self):
import struct
from distutils._msvccompiler import _get_vc_env
arch = "x64" if struct.calcsize("P") * 8 == 64 else "x86"
vc_env = _get_vc_env(arch)
self._compiler_env.update(vc_env)
def build_extension(self, ext):
if not isinstance(ext, Executable):
build_ext.build_extension(self, ext)
return
cmd = [sys.executable, ext.script] + ext.options + [ext.target]
if self.force:
cmd += ["--force"]
log.debug("running '{}'".format(" ".join(cmd)))
if not self.dry_run:
env = self._compiler_env.copy()
if ext.env:
env.update(ext.env)
p = subprocess.run(cmd, cwd=ext.cwd, env=env)
if p.returncode != 0:
from distutils.errors import DistutilsExecError
raise DistutilsExecError(
"running '{}' script failed".format(ext.script)
)
exe_fullpath = os.path.join(ext.output_dir, ext.target)
dest_path = self.get_ext_fullpath(ext.name)
mkpath(os.path.dirname(dest_path), verbose=self.verbose, dry_run=self.dry_run)
copy_file(exe_fullpath, dest_path, verbose=self.verbose, dry_run=self.dry_run)
class CustomEggInfo(egg_info):
def run(self):
# make sure the ots source is downloaded before creating sdist manifest
self.run_command("download")
egg_info.run(self)
cmdclass["download"] = Download
cmdclass["build_ext"] = ExecutableBuildExt
cmdclass["egg_info"] = CustomEggInfo
build_options = []
platform_tags = get_platform().split("-")
if "macosx" in platform_tags:
if "universal2" in platform_tags:
build_options.append("--mac-target=universal2")
elif "arm64" in platform_tags:
build_options.append("--mac-target=arm64")
ots_sanitize = Executable(
"ots.ots-sanitize",
script="build.py",
options=build_options,
output_dir=os.path.join("build", "meson"),
)
with open("README.md", "r", encoding="utf-8") as readme:
long_description = readme.read()
setup(
name="opentype-sanitizer",
use_scm_version={"write_to": "src/python/ots/_version.py"},
description=("Python wrapper for the Op |
orial2.py
This performs the same features as the C++ tutorial2.
It creates a room and a 3D sprite.
===========================================================================
There are two ways to use the CsPython module.
Either as a plugin within CS (pysimp),
or as a pure Python module (this example).
This is just the CS C++ tutorial 2 rewritten in Python.
Overall it looks remarkable like the original C++ code,
just with Python syntax; but the functions are all the same names and formats
(use None instead of NULL, and "" instead of (char*)NULL).
Please refer to the CS Tutorial 2 in the documentation
for detail on how the C++ works.
"""
import types, string, re, sys
import traceback
try: # get in CS
from cspace import *
except:
print "WARNING: Failed to import module cspace"
traceback.print_exc()
sys.exit(1) # die!!
# utils code
#############################
# Note: we are assuming a global 'object_reg'
# which will be defined later
def Report (severity, msg):
"Reporting routine"
csReport(object_reg, severity, "crystalspace.application.python", msg)
def Log(msg):
Report(CS_REPORTER_SEVERITY_NOTIFY, msg)
def FatalError(msg="FatalError"):
"A Panic & die routine"
Report(CS_REPORTER_SEVERITY_ERROR,msg)
sys.exit(1)
# The application
#############################
class MyCsApp:
def Init(self):
Log('MyCsApp.Init()...')
self.vc = object_reg.Get(iVirtualClock)
self.engine = object_reg.Get(iEngine)
self.g3d = object_reg.Get(iGraphics3D)
self.loader = object_reg.Get(iLoader)
self.keybd = object_reg.Get(iKeyboardDriver)
if self.vc==None or self.engine==None or self.g3d==None or self.keybd== | None or self.loader==None:
FatalError("Error: in object registry query")
| if not csInitializer.OpenApplication(object_reg):
FatalError("Could not open the application!")
txtmgr=self.g3d.GetTextureManager()
room=self.SetupRoom() # creates & returns the room
self.CreateLights(room)
self.LoadSprites(room)
self.CreateCamera(room,csVector3(0, 5, -3))
self.engine.Prepare()
SimpleStaticLighter.ShineLights(room, self.engine, 4)
Log('MyCsApp.Init() finished')
def SetupRoom(self):
# load a texture
if self.loader.LoadTexture("stone", "/lib/std/stone4.gif") == None:
FatalError("Error: unable to load texture")
# now get it as a material from the engine
material = self.engine.GetMaterialList().FindByName("stone")
# create the 'room'
room = self.engine.CreateSector("room")
mapper = DensityTextureMapper(0.3)
box = TesselatedBox(csVector3(-5, 0, -5), csVector3(5, 20, 5))
box.SetLevel(3)
box.SetMapper(mapper)
box.SetFlags(Primitives.CS_PRIMBOX_INSIDE)
walls = GeneralMeshBuilder.CreateFactoryAndMesh (self.engine, room, \
"walls", "walls_factory", box)
walls.GetMeshObject().SetMaterialWrapper(material)
return room
def CreateLights(self,room):
# lights
ll = room.GetLights ()
light = self.engine.CreateLight ("", csVector3 (-3, 5, 0), 10,csColor (1, 0, 0), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
light = self.engine.CreateLight ("", csVector3 (3, 5, 0), 10,csColor (0, 0, 1), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
light = self.engine.CreateLight ("", csVector3 (0, 5, -3), 10,csColor (0, 1, 0), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
def LoadSprites(self,room):
# Load a texture for our sprite.
txtmgr=self.g3d.GetTextureManager()
txt=self.loader.LoadTexture ("spark","/lib/std/spark.png",CS_TEXTURE_3D,txtmgr,1)
if txt == None:
FatalError("Error loading texture!")
# Load a sprite template from disk.
imeshfact=self.loader.LoadMeshObjectFactory("/lib/std/sprite1")
if imeshfact == None:
FatalError("Error loading mesh!")
# Create the sprite and add it to the engine.
sprite=self.engine.CreateMeshWrapper(imeshfact,"MySprite",room,csVector3 (-3, 5, 3))
m=csMatrix3()
m.Identity() # make sure its identity
#m.__imul__(5.) # this is the same as m=m*5
m=m*5
sprite.GetMovable().SetTransform(m)
sprite.GetMovable().UpdateMove()
spstate=sprite.GetMeshObject().QueryInterface(iSprite3DState)
spstate.SetAction("default")
#spstate.SetMixMode(CS_FX_SETALPHA (.5))
# The following two calls are not needed since CS_ZBUF_USE and
# Object render priority are the default but they show how you
# can do this.
sprite.SetZBufMode(CS_ZBUF_USE)
sprite.SetRenderPriority(self.engine.GetObjectRenderPriority())
def CreateCamera(self,room,origin):
self.view = csView(self.engine, self.g3d)
self.view.GetCamera().SetSector(room)
self.view.GetCamera().GetTransform().SetOrigin(origin)
g2d = self.g3d.GetDriver2D()
self.view.SetRectangle(2, 2, g2d.GetWidth() - 4, g2d.GetHeight() - 4)
def SetupFrame (self):
#print 'SetupFrame called',
elapsed_time = self.vc.GetElapsedTicks()
# Now rotate the camera according to keyboard state
speed = (elapsed_time / 1000.) * (0.03 * 20);
if self.keybd.GetKeyState(CSKEY_RIGHT):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_ROT_RIGHT, speed)
if self.keybd.GetKeyState(CSKEY_LEFT):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_ROT_LEFT, speed)
if self.keybd.GetKeyState(CSKEY_PGUP):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_TILT_UP, speed)
if self.keybd.GetKeyState(CSKEY_PGDN):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_TILT_DOWN, speed)
if self.keybd.GetKeyState(CSKEY_UP):
self.view.GetCamera().Move(CS_VEC_FORWARD * 4 * speed)
if self.keybd.GetKeyState(CSKEY_DOWN):
self.view.GetCamera().Move(CS_VEC_BACKWARD * 4 * speed)
# Tell 3D driver we're going to display 3D things.
if not self.g3d.BeginDraw(self.engine.GetBeginDrawFlags() | CSDRAW_3DGRAPHICS):
FatalError()
self.view.Draw()
#print 'SetupFrame done'
def FinishFrame(self):
#print 'FinishFrame called'
self.g3d.FinishDraw()
self.g3d.Print(None)
#print 'FinishFrame done'
# EventHandler
#############################
def EventHandler(ev):
#print 'EventHandler called'
if ((ev.Name == KeyboardDown) and
(csKeyEventHelper.GetCookedCode(ev) == CSKEY_ESC)):
q = object_reg.Get(iEventQueue)
if q:
q.GetEventOutlet().Broadcast(csevQuit(object_reg))
return 1
elif ev.Name == Frame:
app.SetupFrame()
app.FinishFrame()
return 1
return 0
# startup code
#############################
# we could write a 'main' fn for this
# but I decided to put in in the body of the app
object_reg = csInitializer.CreateEnvironment(sys.argv)
if object_reg is None:
FatalError("Couldn't create enviroment!")
if csCommandLineHelper.CheckHelp(object_reg):
csCommandLineHelper.Help(object_reg)
sys.exit(0)
if not csInitializer.SetupConfigManager(object_reg):
FatalError("Couldn't init app!")
plugin_requests = [
CS_REQUEST_VFS, CS_REQUEST_OPENGL3D, CS_REQUEST_ENGINE,
CS_REQUEST_FONTSERVER, CS_REQUEST_IMAGELOADER, CS_REQUEST_LEVELLOADER,
]
if not csInitializer.RequestPlugins(object_reg, plugin_requests):
FatalError("Plugin requests failed!")
# setup the event handler:
# note: we need not even make EventHandler() a global fn
# python would accept it as a member fn of MyCsApp
if not csInitializer.SetupEventHandler(object_reg, EventHandler):
FatalError("Could not initialize event handler!")
# Get some often used event IDs
KeyboardDown = csevKeyboardDown(object_reg)
Frame = csevFrame(object_reg)
app=MyCsApp() # this is the one & |
attrs:
priority.append(u'tiny-repeat')
del attrs[u'tiny-repeat']
return priority + attrs.keys()
def evaluate(self,node,binding=None):
if node[u'__name__'] == u'__root__':
map(lambda x:self.evaluate_node(x,binding),node[u'__children__'])
else:
raise Exception(u'not a root node,evaluate illege')
def evaluate_node(self,node,binding=None):
# node should had parent
if binding is not None:
self.binding.update(binding)
binding = self.binding
# save parent
parent = node[u'__parent__']
brothers = parent[u'__children__']
contents = parent[u'__content__']
name = node[u'__name__']
# find brother index
brother_match = -1
for i,brother in enumerate(brothers):
if brother == node :
brother_match = i
break
if brother_match == -1:
raise Exception(u'no match node in parent, illege evaluate')
# find content index
content_match = -1
content_meet = 0
for i,content in enumerate(contents):
if content == u'__node__':
content_meet = content_meet + 1
if content_meet == brother_match+1:
content_match = i
break
if content_match == -1:
raise Exception(u'no match content in parent for node content, illege evaluate')
def replace_in_parent(content_match,brother_match,nodes):
for i,node in enumerate(nodes):
brothers.insert( i + brother_match,node )
contents.insert( i + content_match,u'__node__' )
# remove original
total_nodes = len(nodes)
brothers.pop(total_nodes+brother_match)
contents.pop(total_nodes+content_match)
# evaluated container
nodes = [node]
# find evalutior for name
evaluator = self.evaluator.get(name,None)
if evaluator is not None:
nodes = evaluator(node,binding)
# replace
replace_in_parent(content_match,brother_match,nodes)
# now,new nodes are associalted with main tree
# mark node numbers
total_nodes = len(nodes)
# index trackers
# as attrs may generate more nodes also
content_index_tracker = content_match
brother_index_tracker = brother_match
# deal with attrs
for i,node in enumerate(nodes):
# evaluate attr
attrs = node[u'__attrs__']
# new nodes may be generated by attr evaluator,
# defer it.
# or it will have trouble with tree organization
for attr in self.priority_attrs(attrs):
evaluator = self.evaluator.get(attr,None)
if evaluator is not None:
# evaluate
evaluated = evaluator(node,binding)
# replace `this` node
# attach to m | ain tree
replace_in_parent(content_index_tracker,brother_index_tracker,evaluated)
# delegate evalution of new evaluated nodes
map(lambda x:self.evaluate_node(x,binding),evaluated)
| # hand out control already
# stop processing
return
# here,means node not changed in main tree,
# process children
for child in node[u'__children__']:
self.evaluate_node(child,binding)
def _eval_tiny_repeat(self,node,binding):
attrs = node[u'__attrs__']
times = eval(attrs[u'tiny-repeat'],binding)
index_name = attrs[u'tiny-repeat-index']
# clear instrument
del attrs[u'tiny-repeat']
del attrs[u'tiny-repeat-index']
# node parent
parent = node[u'__parent__']
# expand content
repeated = []
# reuse bindng context
conflict = None
if index_name in binding:
conflict = binding[index_name]
# generate
for i in range(times):
# bind index value
binding[index_name] = i
# DO copy
# take care of recursive bind
copyed = node.clone()
# node not in parents acctualy,
# so a direct evaluate_node will fail.
# make a isolated container for this node,
# then evalute/evaluate_node will work as expected.
# this is a little wired.
psuedo_root = Node()
psuedo_root[u'__children__'].append(copyed)
psuedo_root[u'__content__'].append(u'__node__')
copyed[u'__parent__'] = psuedo_root
self.evaluate(psuedo_root,binding)
# node is evaluated
# reaper nodes
# re-associate parent
for child in psuedo_root[u'__children__']:
child[u'__parent__'] = parent
repeated.extend(psuedo_root[u'__children__'])
# recover conflict
if conflict is not None:
binding[index_name] = conflict
return repeated
def _eval_tiny_number(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-number'],binding))
# clear instrument
del attrs[u'tiny-number']
if u'tiny-force-integer' in attrs:
# froce integer
del attrs[u'tiny-force-integer']
if not math.isnan(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,}'.format(0))
else:
# fill content
if math.isnan(value):
node[u'__content__'].append(u'N/A')
elif value == int(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,.2f}'.format(value))
if u'tiny-color' in attrs and not math.isnan(value):
del attrs[u'tiny-color']
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
return [node]
def _eval_tiny_percent(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-percent'],binding))
# clear instrument
del attrs[u'tiny-percent']
if not math.isnan(value):
if u'tiny-precision' in attrs:
format = u'{:,.%s%%}' % eval(attrs[u'tiny-precision'],binding)
node[u'__content__'].append(format.format(value))
else:
node[u'__content__'].append(u'{:,.2%}'.format(value))
else:
node[u'__content__'].append(u'N/A')
if u'tiny-default-color' not in attrs:
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
else:
del attrs[u'tiny-default-color']
return [node]
def _eval_tiny_data(self,node,binding):
attrs = node[u'__attrs__']
node[u'__content__'].append(u'%s' % eval(attrs[u'tiny-data'],binding))
# clear instrument
del attrs[u'tiny-data']
return [node]
def |
#!/usr/bin/python2
"""fkmonthgraph - graph of enemy fighter kills & losses, by month
Requires matplotlib, see http://matplotlib.org or search your package
manager (Debian: apt-get install python-matplotlib)
"""
import sys
import hdata, fighterkill
from extra_data import Fighters as extra
import matplotlib.pyplot as plt
if __name__ == '__main__':
showtotal = '--nototal' not in sys.argv
legend = '--nolegend' not in sys.argv
data = fighterkill.extract_kills(sys.stdin)
monthly = {}
month = min(data.keys())
last = max(data.keys())
while month <= last:
next = month.nextmonth()
monthly[month] = {'total':{'kills':0, 'losses':0}, 'kills':[0 for i,f in enumerate(hdata.Fighters)], 'losses':[0 for i,f in enumerate(hdata.Fighters)]}
d = month.copy()
while d < next:
if d in data:
monthly[month]['total']['kills'] += data[d]['total']['kills']
monthly[month]['total']['losses'] += data[d]['total']['losses']
for i,f in enumerate(hdata.Fighters):
monthly[month]['kills'][i] += data[d]['kills'][i]
monthly[month]['losses'][i] += data[d]['losses'][i]
d = d.ne | xt()
month = next
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
dates = sorted(monthly.keys())
for fi,f in enumerate(hdata.Fighter | s):
def ins(m):
return hdata.inservice(m, f) or hdata.inservice(m.nextmonth(), f)
gp = plt.plot_date([d.ordinal() for d in dates if ins(d)], [monthly[d]['kills'][fi] for d in dates if ins(d)], fmt='o-', mew=0, color=extra[f['name']]['colour'], tz=None, xdate=True, ydate=False, label=f['name'], zorder=0)
gl = plt.plot_date([d.ordinal() for d in dates if ins(d)], [-monthly[d]['losses'][fi] for d in dates if ins(d)], fmt='o-', mew=0, color=extra[f['name']]['colour'], tz=None, xdate=True, ydate=False, label=None, zorder=0)
gt = plt.plot_date([d.ordinal() for d in dates], [monthly[d]['total']['kills'] for d in dates], fmt='k+-', tz=None, xdate=True, ydate=False, label='total', zorder=-2)
gb = plt.plot_date([d.ordinal() for d in dates], [-monthly[d]['total']['losses'] for d in dates], fmt='k+-', tz=None, xdate=True, ydate=False, label=None, zorder=-2)
ax.grid(b=True, axis='y')
plt.axhline(y=0, xmin=0, xmax=1, c='k', zorder=-1)
if legend: plt.legend(ncol=2, loc='upper left')
plt.show()
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLE | ASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/engine/shared_hutt_medium_engine_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
| return result |
from django.db import | models
class Author(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self | ):
return self.name
class Book(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __unicode__(self):
return self.name
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class responderparam(base_resource) :
""" Configuration for responser parameter resource. """
def | __init__(self) :
self._undefaction = ""
@property
def undefaction(self) :
ur"""Action to perform when policy evaluation creates an UNDEF condition. Available settings function as follows:
* NOOP - Send the request to the protected server.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOOP".
"""
try :
| return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
ur"""Action to perform when policy evaluation creates an UNDEF condition. Available settings function as follows:
* NOOP - Send the request to the protected server.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOOP"
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(responderparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.responderparam
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update responderparam.
"""
try :
if type(resource) is not list :
updateresource = responderparam()
updateresource.undefaction = resource.undefaction
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of responderparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = responderparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the responderparam resources that are configured on netscaler.
"""
try :
if not name :
obj = responderparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class responderparam_response(base_response) :
def __init__(self, length=1) :
self.responderparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.responderparam = [responderparam() for _ in range(length)]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple lib to connect to a Zabbix agent and request the value of an item.
"""
import socket
def query_agent(**kwarg | s):
"""
Open a socket to port 10050 on the remote server and query for the number of
processes running via proc.num[<FOO>], where FOO is either zabbix_server or
zabbix_proxy.
"""
query_string = kwargs.get('query_string', 'agent.ping')
query_host = kwargs.get('query_host', '127.0.0.1')
query_port = kwargs.get(' | query_port', '10050')
try:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect((query_host, query_port))
except:
return (99999, 'ERROR: {} :: {}:{}'.format(e, query_host, query_port))
else:
connection.send(query_string)
result = connection.recv(8192)
connection.close()
retval = ''.join(x for x in result if x.isdigit())
return (0, retval)
return (0 ,'')
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
trusted_scons_files = [
'src/shared/ppapi/build.scons',
'src/shared/ppapi_proxy/build.scons',
'src/trusted/plugin/build.scons',
'tests/ppapi_geturl/build.scons',
'tests/ppapi_messaging/build.scons',
'tests/ppapi_browser/ppb_file_system/build.scons',
'tests/ppapi_tests/build.scons', # Build PPAPI tests from Chrome as a .so
]
# Untrusted libraries for use by user code.
untrusted_scons_files = [
'src/shared/ppapi/nacl.scons',
]
# Untrusted libraries for use by system code.
untrusted_irt_scons_files = [
'src/shared/ppapi_proxy/nacl.scons',
]
nonvariant_test_scons_files = [
'tests/ppapi/nacl.scons',
'tests/ppapi_browser/bad/nacl.scons',
'tests/ppapi_browser/crash/nacl.scons',
'tests/ppapi_browser/extension_mime_handler/nacl.scons',
'tests/ppapi_browser/manifest/nacl.scons',
'tests/ppapi_browser/ppb_core/nacl.scons',
'tests/ppapi_browser/ppb_dev/nacl.scons',
'tests/ppapi_browser/ppb_file_system/nacl.scons',
'tests/ppapi_browser/ppb_graphics2d/nacl.scons',
'tests/ppapi_browser/ppb_graphics3d/nacl.scons',
'tests/ppapi_browser/ppb_image_data/nacl.scons',
'tests/ppapi_browser/ppb_instan | ce/nacl.scons',
'tests/ppapi_browser/ppb_memory/nacl.scons',
'tests/ppapi_browser/ppb_pdf/nacl.scons',
'tests/ppapi_browser/ppb_scrollbar/nacl.scons',
'tests/ppapi_browser/ppb_url_loader/nacl.scons',
'tests/ppapi_browser/ppb_url_request_info/nacl.scons',
'tests/ppapi_browser/ppb_var/nacl.scons',
'tests/ppapi_browser/ppb_widget/nacl.scons',
'tests/ppapi_browser/ppp_input_event/nacl.sco | ns',
'tests/ppapi_browser/ppp_instance/nacl.scons',
'tests/ppapi_browser/progress_events/nacl.scons',
'tests/ppapi_browser/stress_many_nexes/nacl.scons',
'tests/ppapi_example_2d/nacl.scons',
'tests/ppapi_example_audio/nacl.scons',
'tests/ppapi_example_events/nacl.scons',
# TODO(dspringer): re-enable test once the 3D ABI has stabilized. See
# http://code.google.com/p/nativeclient/issues/detail?id=2060
# 'tests/ppapi_example_gles2/nacl.scons',
'tests/ppapi_example_post_message/nacl.scons',
'tests/ppapi_geturl/nacl.scons',
'tests/ppapi_gles_book/nacl.scons',
'tests/ppapi_messaging/nacl.scons',
# Broken by Chrome change
# http://code.google.com/p/nativeclient/issues/detail?id=2480
#'tests/ppapi_simple_tests/nacl.scons',
'tests/ppapi_test_example/nacl.scons',
'tests/ppapi_test_lib/nacl.scons',
'tests/ppapi_tests/nacl.scons',
]
|
ppool_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name2'
},
'spec': {
'cidr': "fed0:8001::/64",
'ipipMode': 'Never',
'blockSize': 123
}
}
#
# BGPPeers
#
bgppeer_name1_rev1_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node1',
'peerIP': '192.168.0.250',
'asNumber': 64514,
},
}
bgppeer_name1_rev2_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node2',
'peerIP': '192.168.0.251',
'asNumber': 64515,
},
}
bgppeer_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-456def',
},
'spec': {
'node': 'node2',
'peerIP': 'fd5f::6:ee',
'asNumber': 64590,
},
}
#
# Network Policy
#
networkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
| 'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selecto | r': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
networkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
networkpolicy_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy2',
'namespace': 'default',
'generateName': 'test-policy-',
'deletionTimestamp': '2006-01-02T15:04:07Z',
'deletionGracePeriodSeconds': 30,
'ownerReferences': [{
'apiVersion': 'extensions/v1beta1',
'blockOwnerDeletion': True,
'controller': True,
'kind': 'DaemonSet',
'name': 'endpoint1',
'uid': 'test-uid-change',
}],
'initializers': {
'pending': [{
'name': 'initializer1',
}],
'result': {
'status': 'test-status',
},
},
'clusterName': 'cluster1',
'labels': {'label1': 'l1', 'label2': 'l2'},
'annotations': {'key': 'value'},
'selfLink': 'test-self-link',
'uid': 'test-uid-change',
'generation': 3,
'finalizers': ['finalizer1', 'finalizer2'],
'creationTimestamp': '2006-01-02T15:04:05Z',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global Network Policy
#
globalnetworkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selector': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
globalnetworkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'doNotTrack': True,
'applyOnForward': True,
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global network sets
#
globalnetworkset_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': [
"10.0.0.1",
"11.0.0.0/16",
"feed:beef::1",
"dead:beef::96",
]
}
}
# A network set with a large number of entries. In prototyping this test, I found that there are
# "upstream" limits that cap how large we can go:
#
# - Kubernetes' gRPC API has a 4MB message size limit.
# - etcdv3 has a 1MB value size limit.
many_nets = []
for i in xrange(10000):
many_nets.append("10.%s.%s.0/28" % (i >> 8, i % 256))
globalnetworkset_name1_rev1_large = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': many_nets,
}
}
#
# Host Endpoints
#
hostendpoint_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'database'},
},
'spec': {
'interfaceName': 'eth0',
'profiles': ['prof1', 'prof2'],
'node': 'host1'
}
}
hostendpoint_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend'}
},
'spec': {
'interfaceName': 'cali7',
'profiles': ['prof1', 'prof2'],
'node': 'host2'
}
}
hostendpoint_name1_rev3 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend', 'misc': 'version1'},
'a |
_ import with_statement
import socket
import sys
from collections import defaultdict, deque
from functools import partial
from itertools import count
from . import serialization
from .entity import Exchange, Queue
from .log import Log
from .messaging import Consumer as _Consumer
from .utils import uuid
__all__ = ["Broadcast", "entry_to_queue", "maybe_declare", "uuid",
"itermessages", "send_reply", "isend_reply",
"collect_replies", "insured", "ipublish"]
declared_entities = defaultdict(lambda: set())
insured_logger = Log("kombu.insurance")
class Broadcast(Queue):
"""Convenience class used to define broadcast queues.
Every queue instance will have a unique name,
and both the queue and exchange is configured with auto deletion.
:keyword name: This is used as the name of the exchange.
:keyword queue: By default a unique id is used for the queue
name for every consumer. You can specify a custom queue
name here.
:keyword \*\*kwargs: See :class:`~kombu.entity.Queue` for a list
of additional keyword arguments supported.
"""
def __init__(self, name=None, queue=None, **kwargs):
return super(Broadcast, self).__init__(
name=queue or "bcast.%s" % (uuid(), ),
**dict({"alias": name,
"auto_delete": True,
"exchange": Exchange(name, type="fanout"),
}, **kwargs))
def maybe_declare(entity, channel, retry=False, **retry_policy):
if retry:
return _imaybe_declare(entity, channel, **retry_policy)
return _maybe_declare(entity, channel)
def _maybe_declare(entity, channel):
declared = declared_entities[channel.connection.client]
if not entity.is_bound:
entity = entity.bind(channel)
if not entity.can_cache_declaration or entity not in declared:
entity.declare()
declared.add(entity)
return True
return False
def _imaybe_declare(entity, channel, **retry_policy):
entity = entity(channel)
return channel.connection.client.ensure(entity, _maybe_declare,
**retry_policy)(entity, channel)
def itermessages(conn, channel, queue, limit=1, timeout=None,
Consumer=_Consumer, **kwargs):
acc = deque()
def on_message(body, message):
acc.append((body, message))
with Consumer(channel, [queue], callbacks=[on_message], **kwargs):
for _ in eventloop(conn, limit=limit, timeout=timeout,
ignore_timeouts=True):
try:
yield acc.popleft()
except IndexError:
pass
def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False):
"""Best practice generator wrapper around ``Connection.drain_events``.
Able to drain events forever, with a limit, and optionally ignoring
timeout errors (a timeout of 1 is often used in environments where
the socket can get "stuck", and is a best practice for Kombu consumers).
**Examples**
``eventloop`` is a generator::
>>> from kombu.common import eventloop
>>> it = eventloop(connection, timeout=1, ignore_timeouts=True)
>>> it.next() # one event consumed, or timed out.
>>> for _ in eventloop(connection, timeout=1, ignore_timeouts=True):
... pass # loop forever.
It also takes an optional limit parameter, and timeout errors
are propagated by default::
for _ in eventloop(connection, limit=1, timeout=1):
pass
.. seealso::
:func:`itermessages`, which is an event loop bound to one or more
consumers, that yields any messages received.
"""
for i in limit and xrange(limit) or count():
try:
yield conn.drain_events(timeout=timeout)
except socket.timeout:
if timeout and not ignore_timeouts:
raise
except socket.error:
pass
def send_reply(exchange, req, msg, producer=None, **props):
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
maybe_declare(exchange, producer.channel)
producer.publish(msg, exchange=exchange,
**dict({"routing_key": req.properties["reply_to"],
"correlation_id": req.properties.get("correlation_id"),
"serializer": serializer},
**props))
def isend_reply(pool, exchange, req, msg, props, **retry_policy):
return ipublish(pool, send_reply,
(exchange, req, msg), props, **retry_policy)
def collect_replies(conn, channel, queue, *args, **kwargs):
no_ack = kwargs.setdefault("no_ack", True)
received = False
for body, message in itermessages(conn, channel, queue, *args, **kwargs):
if not no_ack:
message.ack()
received = True
yield body
if received:
channel.after_reply_message_received(queue.name)
def _ensure_errback(exc, interval):
insured_logger.error(
"Connection error: %r. Retry in %ss\n" % (exc, interval),
exc_info=sys.exc_info())
def revive_connection(connection, channel, on_revive=None):
if on_revive:
on_revive(channel)
def revive_producer(producer, channel, on_revive=None):
revive_connection(producer.connection, channel)
if on_revive:
on_revive(channel)
def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts):
"""Ensures function performing broker commands completes
despite intermittent connection failures."""
errback = errback or _ensure_errback
with pool.acquire(block=True) as conn:
conn.ensure_connection(errback=errback) |
# we cache the channel for subsequent calls, this has to be
# reset on revival.
channel = conn.default_channel
revive = partial(revive_connection, conn, on_revive=on_revive)
insured = con | n.autoretry(fun, channel, errback=errback,
on_revive=revive, **opts)
retval, _ = insured(*args, **dict(kwargs, connection=conn))
return retval
def ipublish(pool, fun, args=(), kwargs={}, errback=None, on_revive=None,
**retry_policy):
with pool.acquire(block=True) as producer:
errback = errback or _ensure_errback
revive = partial(revive_producer, producer, on_revive=on_revive)
f = producer.connection.ensure(producer, fun, on_revive=revive,
errback=errback, **retry_policy)
return f(*args, **dict(kwargs, producer=producer))
def entry_to_queue(queue, **options):
binding_key = options.get("binding_key") or options.get("routing_key")
e_durable = options.get("exchange_durable")
if e_durable is None:
e_durable = options.get("durable")
e_auto_delete = options.get("exchange_auto_delete")
if e_auto_delete is None:
e_auto_delete = options.get("auto_delete")
q_durable = options.get("queue_durable")
if q_durable is None:
q_durable = options.get("durable")
q_auto_delete = options.get("queue_auto_delete")
if q_auto_delete is None:
q_auto_delete = options.get("auto_delete")
e_arguments = options.get("exchange_arguments")
q_arguments = options.get("queue_arguments")
b_arguments = options.get("binding_arguments")
exchange = Exchange(options.get("exchange"),
type=options.get("exchange_type"),
delivery_mode=options.get("delivery_mode"),
routing_key=options.get("routing_key"),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get("exclusive"),
auto_delete=q_auto_delete,
no_ack=options.get("no_ack"),
queue_arguments=q_arguments,
binding_argument |
#!/usr/bin/python
#
# Copyright 2014, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# native
'''
Created on 13 oct. 2014
@author: ronan.lemartret@open.eurogiciel.org
'''
try:
import cmdln
except:
print >> sys.stderr, 'Error spec2yocto require "python-cmdln" please install it.'
sys.exit( 1 )
import sys
import os
#TODO need precision
#WARNING if patch can be a gz file
#WARNING if patch can be conpose by many commit
def isPatch(files) :
return (".diff" in files) or (".patch" in files)
#TODO need precision
def isFromIntel(patch_file) :
if patch_file.endswith('.diff') or patch_file.endswith('.patch'):
with open(patch_file,"r") as patch_fd:
for line in patch_fd:
if line.startswith("From:") and (("intel.com" in line) or ("eurogiciel.org" in line) or ("fridu.org" in line)):
return True
return False
def count_intel_patch(SRCDIR,package_files):
count_intel_patch=0
for p in package_files:
if isPatch( p) and isFromIntel(os.path.join(SRCDIR,p)):
count_intel_patch+=1
return count_intel_patch
def count_patch(package_files) :
count_patch=0
for p in package_files:
if isPatch( p):
count_patch+=1
return count_patch
#What if many spec file?
def get_license(SRCDIR,package_files) :
license=""
for p in package_files:
if (".spec" in p):
return find_license(os.path.join(SRCDIR,p))
return license
#What if many license file?
#TODO need precision
def find_license(spec_file) :
license=""
with ope | n(spec_file,"r") as spec_fd:
for line in spec_fd:
if "License:" in line:
return line.split("License:")[1].replace("\n","").replace("\t","").replace(" ","")
return license
class CheckRpmSrc(cmdln.Cmdln):
name = "createVersionYoctoTizen"
version = "0.1"
@cmdln.option( "--rpmsSRCDIR",
actio | n = "store",
default = "Tizen-rpm-source.html",
help = "the Tizen rpms source dir" )
def do_status(self, subcmd, opts):
"""generate status
${cmd_usage}--
${cmd_option_list}
"""
for package_rpm in os.listdir(opts.rpmsSRCDIR):
package_dir=package_rpm
release=package_rpm[package_rpm.rfind("-")+1:].replace(".src.rpm","")
package_rpm=package_rpm[:package_rpm.rfind("-")]
version=package_rpm[package_rpm.rfind("-")+1:]
name=package_rpm[:package_rpm.rfind("-")]
package_files = os.listdir(os.path.join(opts.rpmsSRCDIR, package_dir))
nb_patch=count_patch(package_files)
license=get_license(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
nb_intel_patch=count_intel_patch(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
print "%s\t%s\t%s\t%s\t%s" %(name, version, license, nb_patch, nb_intel_patch)
def main():
checkRpmSrc = CheckRpmSrc()
sys.exit( checkRpmSrc.main() )
if __name__ == '__main__':
main() |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implie | d.
## See the License for the spec | ific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import CallResult
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
Application component that provides procedures which
return complex results.
"""
def onConnect(self):
self.join("realm1")
def onJoin(self, details):
def add_complex(a, ai, b, bi):
return CallResult(c = a + b, ci = ai + bi)
self.register(add_complex, 'com.myapp.add_complex')
def split_name(fullname):
forename, surname = fullname.split()
return CallResult(forename, surname)
self.register(split_name, 'com.myapp.split_name')
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVIS | ED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package co | ntenant la commande 'scripting alerte info'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.format.fonctions import echapper_accolades
from primaires.format.date import get_date
class PrmInfo(Parametre):
"""Commande 'scripting alerte info'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "info", "info")
self.schema = "<nombre>"
self.aide_courte = "affiche des informations sur l'alerte"
self.aide_longue = \
"Affiche des informations sur l'alerte permettant de la corriger."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
nombre = dic_masques["nombre"].nombre
try:
alerte = type(self).importeur.scripting.alertes[nombre]
except KeyError:
personnage << "|err|Ce numéro d'alerte est invalide.|ff|"
else:
msg = "Informations sur l'alerte {} :".format(alerte.no)
msg += "\n S'est produit sur {} {}".format(alerte.type,
alerte.objet) + " " + get_date(alerte.date.timetuple())
msg += "\n Evenement {}, test {}, ligne {}".format(
alerte.evenement, echapper_accolades(alerte.test),
alerte.no_ligne)
msg += "\n {}\n".format(echapper_accolades(alerte.ligne))
msg += "\n Message d'erreur : |err|{}|ff|".format(
echapper_accolades(alerte.message))
if personnage.nom_groupe == "administrateur":
msg += "\n Traceback Python :\n {}".format(
echapper_accolades(alerte.traceback))
personnage << msg
|
from typing import List, Any, Mapping
from .utils import clean_filters
class DockerTasks(object):
def __init__(self, docker):
self.docker = docker
async def list(self, *, filters: Mapping=None) -> List[Mapping]:
"""
Return a list of tasks
Args:
filters: a collection of filters
Available filters:
desired-state=(running | shutdown | accepted)
id=<task id>
label=key or label="key=value"
name=<task name>
node=<node id or name>
service=<service name>
"""
params = {"filters": clean_filters(filters)}
response = await self.docker._query_json(
"tasks",
method='GET',
params=params
)
return response
async def inspect(self, task_id: str) -> Mapping[str, Any]:
"""
Return info about a tas | k
Args:
task_id: is ID | of the task
"""
response = await self.docker._query_json(
"tasks/{task_id}".format(task_id=task_id),
method='GET',
)
return response
|
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_util as test_util
volume = None
disconnect = False
host = None
def test():
global disconnect, volume, host
# query&get clusters
cond = res_ops.gen_query_conditions('name', '=', "cluster1")
cluster1 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
cond = res_ops.gen_query_conditions('name', '=', "cluster2")
cluster2 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
# query&get hosts
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster1.uuid)
cluster1_host = res_ops.query_resource(res_ops.HOST, cond)
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster2.uuid)
cluster2_host = res_ops.query_resource(res_ops. | HOST, cond)
# disconnect mn_host1
host = cluster1_host[0]
host_ops.update_kvm_host(host.uuid, 'username', "root1")
try:
host_ops.reconnect_host(host.uuid)
except:
test_util.test_logger("host: [%s] is disconnected" % host.uuid)
disconnect = True
# create_volume on 2 clusters
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
systemtags1 = [ | "volumeProvisioningStrategy::ThickProvisioning", "capability::virtio-scsi",
"miniStorage::clusterUuid::%s" % cluster1.uuid]
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_name("cluster1_volume")
volume_creation_option.set_primary_storage_uuid(ps.uuid)
volume_creation_option.set_system_tags(systemtags1)
volume_creation_option.set_diskSize(2 * 1024 * 1024 * 1024)
try:
volume_inv = vol_ops.create_volume_from_diskSize(volume_creation_option)
except Exception as e:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
print e.message.encode("utf-8")
def error_cleanup():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
def env_recover():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
|
# 1. Convert 1024 to binary | and hexadecimal representation:
x = 1024
y = bin(x)
z | = hex(x) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/getaddons.ui'
#
# Created: Fri Aug 22 00:57:31 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(367, 204)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.code = QtGui.QLineEdit(Dialog)
self.code.setObjectName(_fromUtf8("code"))
self.horizontalLayout.addWidget(self.code)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGN | AL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_("Install Add-on"))
self.label.setText(_("To browse add-ons, please click the browse button below.<br><br>When you\'ve found an add-on you like, please paste its code below."))
self.label_2.setText(_("Code:"))
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Licen | se for the specific language g | overning permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetFinding
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-websecurityscanner
# [START websecurityscanner_v1alpha_generated_WebSecurityScanner_GetFinding_sync]
from google.cloud import websecurityscanner_v1alpha
def sample_get_finding():
# Create a client
client = websecurityscanner_v1alpha.WebSecurityScannerClient()
# Initialize request argument(s)
request = websecurityscanner_v1alpha.GetFindingRequest(
name="name_value",
)
# Make the request
response = client.get_finding(request=request)
# Handle the response
print(response)
# [END websecurityscanner_v1alpha_generated_WebSecurityScanner_GetFinding_sync]
|
nascii, pyasn1, pyasn1.codec.ber.decoder, pycurl, StringIO
SIGNATURE_NONE = b'\x00'
SIGNATURE_ECDSA_SHA256_SECP224R1 = b'\x01'
# Dump integer to a C array, used for debugging only
def c_array(name, val):
length = len(val)
res = ""
for i in range(0, length):
if (i % 16) == 0:
res = res + " "
res = res + "0x%02x, " % val[i]
if ((i % 16) == 15) | (i == (length - 1)):
res = res + "\n"
res = " uint8_t " + name + "[] = {\n" + res + " };"
print res
# Read a little endian value from an ELF file
def elf_read_le(buf, offset, n):
val = 0
for i in range(0, n):
val = (val << 8) | ord(buf[offset + n - 1 - i])
return val
# Replace the contents of an ELF section. Used to replace blank signature data with the actual signature.
# binutils objcopy has a new command '--update-section .sectionname=file', but is not yet available. This does the same thing.
def elf_update_section(elfPath, sectionName, sectionData):
sectionTuple = collections.namedtuple("sectionTuple", "name_offset, name, offset, size")
# Read in the original ELF
elfSize = os.stat(elfPath).st_size
elf = open(elfPath, "rb")
output = elf.read()
elf.close()
# Do some sanity checking on the ELF file headers
magic = output[0:4]
assert magic == b'\x7fELF', 'Magic number does not match | '
ei_class = output[4]
assert ei_class == b'\x01', 'Only 32-bit ELF files are supported'
ei_data = output[5]
assert ei_class == b'\x01', "Only LE ELF | files are supported"
ei_version = output[6]
assert ei_version == b'\x01', "Only ELF version 1 is supported"
e_shoff = elf_read_le(output, 0x20, 4) # Start of section header table
e_shentsize = elf_read_le(output, 0x2e, 2) # Size of a section header table entry
e_shnum = elf_read_le(output, 0x30, 2) # Number of entries in the section header table
e_shstrndx = elf_read_le(output, 0x32, 2) # Index of the section header table entry that contains the section names
assert (e_shoff + (e_shnum * e_shentsize)) <= elfSize, "Section header runs past end of file"
assert e_shstrndx <= e_shnum, "Section name index > number of sections"
# Read in all the sections in the table
sections = []
for i in range(0, e_shnum):
sh_name = elf_read_le(output, (e_shoff + (i * e_shentsize) + 0), 4)
sh_offset = elf_read_le(output, (e_shoff + (i * e_shentsize) + 16), 4)
sh_size = elf_read_le(output, (e_shoff + (i * e_shentsize) + 20), 4)
assert (sh_offset + sh_size) <= elfSize, "Section data runs past end of file"
s = sectionTuple(name_offset = sh_name, name = "", offset = sh_offset, size = sh_size)
sections.append(s)
# Lookup the section names
for i in range(0, e_shnum):
s = sectionTuple(name_offset = sections[i].name_offset, \
name = output[(sections[e_shstrndx].offset + sections[i].name_offset):].partition(b'\x00')[0], \
offset = sections[i].offset, \
size = sections[i].size)
sections[i] = s
# Find the section we want to update
sectionIndex = -1
for i in range(0, e_shnum):
if sections[i].name == sectionName:
sectionIndex = i
assert sectionIndex >= 0, "Section %s not found in ELF" % sectionName
assert len(sectionData) == sections[sectionIndex].size, "Size of signature data file (%d) doesn't match size of section (%d)" % (len(sectionData), sections[sectionIndex].size)
# Replace the ELF section with the new content
output = output[0:sections[sectionIndex].offset] + \
sectionData + \
output[(sections[sectionIndex].offset + sections[sectionIndex].size):]
elf = open(elfPath, "wb")
elf.write(output)
elf.close();
# Dump an integer as a byte array, in the big endian format used by micro-ecc
def int_to_bytearray(val, length):
res = bytearray(length)
for i in range(0, length):
res[length - (1 + i)] = (val & 0xff)
val = (val & ~0xff) >> 8
assert val == 0, "Dumped int to C array, but length %i not big enough" % length
return res
def main(argv):
elfPath = sys.argv[1] # Path to the ELF we want to sign
signingKey = sys.argv[2] # Key to use (either a service URL or a local key path)
sectionName = sys.argv[3] # ELF section to replace with signature
elf2binCmd = sys.argv[4] # Command for converting ELF to in-memory binary representation
# Generate a tempfile that we can dump the binary to. Objdump cannot dump to a pipe.
tempBinFile = tempfile.NamedTemporaryFile();
elf2binCmdline = elf2binCmd + " " + elfPath + " " + tempBinFile.name
if 'debug' in globals():
print "Signing %s, section '%s' using %s" % (elfPath, sectionName, signingKey)
print "Generating bin using '%s'" % elf2binCmdline
# Generate the binary that we sign (the provided command removes the signature placeholder section)
os.system(elf2binCmdline);
# Compute the SHA-256 hash of the image we are signing
h = open(tempBinFile.name)
hash = binascii.hexlify(hashlib.sha256(h.read()).digest())
h.close()
# Dump out the length and hash of the signed image
if 'debug' in globals():
print "Signed length = %d bytes" % os.stat(tempBinFile.name).st_size
print "Image SHA-256 = %s" % hash
# If the signingKey looks like a URL, we do online signing; otherwise, use a locally stored key
if signingKey.startswith('https://'):
# Append the hash to the URL
signingKey = signingKey + "&hash=" + hash
if 'debug' in globals():
print "Signing using remote service URL: %s" % signingKey
# Get the auth parameter that should have been exported from the environment
assert 'auth' in os.environ, "Signing service credentials 'auth' not exported from environment"
# Use cURL to request signing by the service
buffer = StringIO.StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, signingKey)
if 'allowSelfSignedTLSCerts' in globals():
curl.setopt(pycurl.SSL_VERIFYPEER, False)
curl.setopt(pycurl.FAILONERROR, True)
curl.setopt(pycurl.WRITEDATA, buffer)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, os.environ['auth'])
try:
curl.perform()
except pycurl.error as e:
# Handle HTTP error return codes user the assert below, to make it easier to diagnose issues
if e.args[0] != curl.E_HTTP_RETURNED_ERROR:
raise e
http_code = curl.getinfo(pycurl.HTTP_CODE)
assert http_code == 200, "HTTP error %d returned by service" % http_code
curl.close()
signature = buffer.getvalue()
assert len(signature) == (2 * 60), "Signature returned by service has wrong length (%d != %d)" % (len(signature), (2 * 60))
if 'debug' in globals():
print "Service returned signature: %s" % signature
sig = bytearray(binascii.unhexlify(signature))
else:
if 'debug' in globals():
print "Signing using locally stored key"
# Sign the binary, currently using a local key and OpenSSL.
process = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", signingKey, tempBinFile.name], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
# Extract the signature values from the DER output
res = pyasn1.codec.ber.decoder.decode(stdout)[0]
assert len(res) == 2, "Expected 2 values in generated EC signature, got %d" % len(res)
assert isinstance(res.getComponentByPosition(0), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
assert isinstance(res.getComponentByPosition(1), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
r = int(res.getComponentByPosition(0))
s = int(res.getComponentByPosition(1))
# Generate the signature block.
# The size of this signature block needs to match the size |
must be grayscale.
"""
if image.shape[-1] in (3, 4):
msg = "threshold_otsu is expected to work correctly only for " \
"grayscale images; image shape {0} looks like an RGB image"
warnings.warn(msg.format(image.shape))
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values b | y zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
|
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : array
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities more than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresho |
# -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# 2014-10-17 Fix the empty result problem
# Cornelius Kölbel, <cornelius@privacyidea.org>
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__="""This is the BaseClass for audit trails
The audit is supposed to work like this. First we need to create an audit
object. E.g. this can be done in the before_request:
g.audit_object = getAudit(file_config)
During the request, the g.audit_object can be used to add audit information:
g.audit_object.log({"client": "123.2.3.4", "action": "validate/check"})
Thus at many different places in the code, audit information can be added to
the audit object.
Finally the audit_object needs to be stored to the audit storage. So we call:
g.audit_object.finalize_log()
which creates a signature of the audit data and writes the data to the audit
storage.
"""
import logging
log = logging.getLogger(__name__)
from privacyidea.lib.log import log_with
@log_with(log)
def getAuditClass(packageName, className):
"""
helper method to load the Audit class from a given
package in literal:
example:
getAuditClass("privacyidea.lib.auditmodules.sqlaudit", "Audit")
check:
checks, if the log method exists
if not an error is thrown
"""
mod = __import__(packageName, globals(), locals(), [className])
klass = getattr(mod, className)
log.debug("klass: %s" % klass)
if not hasattr(klass, "log"): # pragma: no cover
raise NameError("Audit AttributeError: " + packageName + "." +
className + " instance has no attribute 'log'")
return klass
@log_with(log)
def getAudit(config):
"""
This wrapper function creates a new audit object based on the config
from the config file. The config file entry could look like this:
PI_AUDIT_MODULE = privacyidea.lib.auditmodules.sqlaudit
Each audit module (at the moment only SQL) has its own additional config
entries.
:param config: The config entries from the file config
:return: Audit Object
"""
audit_module = config.get("PI_AUDIT_MODULE")
audit = getAuditClass(audit_module, "Audit")(config)
return audit
@log_with(log)
def search(config, param=None, user=None):
"""
Returns a list of audit entries, supports pagination
:param config: The config entries from the file config
:return: Audit dictionary with information about the previous and next
pages.
"""
audit = getAudit(config)
sortorder = "desc"
page_size = 15
page = 1
# The filtering dictionary
param = param or {}
# special treatment for:
# sortorder, page, pagesize
if "sortorder" in param:
sortorder = param["sortorder"]
del param["sortorder"]
if "page" in param:
page = param["page"]
del param["page"]
if "page_size" in param:
page_size = param["page_size"]
del param["page_size"]
pagination = audit.search(param, sortorder=sortorder, page=page,
page_size=page_size)
ret = {" | auditdata": pagination.auditdata,
"prev": pagination.prev,
"next": pagination.next,
"current": pagin | ation.page,
"count": pagination.total}
return ret
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Sou | rce Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redis | tribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import account_inv
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
build configuration.
All build configuration data is composed of basic python builtin types and higher-level
configuration objects that aggregate configuration data. Configuration objects can carry a name
in which case they become addressable and can be reused.
"""
# Internal book-keeping fields to exclude from hash codes/equality checks.
_SPECIAL_FIELDS = ('extends', 'merges', 'type_alias')
def __init__(self, abstract=False, extends=None, merges=None, **kwargs):
"""Creates a new configuration data blob.
By default configurations are anonymous (un-named), concrete (not `abstract`), and they neither
inherit nor merge another configuration.
Inheritance is only allowed via one of the `extends` or `merges` channels, it is an error to
specify both. A configuration can be semantically abstract without setting `abstract=True`.
The `abstract` value can serve as documentation, or, for subclasses that provide an
implementation for `validate_concrete`, it allows skipping validation for abstract instances.
:param bool abstract: `True` to mark this configuration item as abstract, in which case no
validation is performed (see `validate_concrete`); `False` by default.
:param extends: The configuration instance to inherit field values from. Any shared fields are
over-written with this instances values.
:type extends: An addressed or concrete configuration instance that is a type compatible with
this configuration or this configurations superclasses.
:param merges: The configuration instance to merge this instances field values with. Merging is
like extension except for containers, which are extended instead of replaced; ie:
any `dict` values are updated with this instances items and any `list` values are
extended with this instances items.
:type merges: An addressed or co | ncrete configuration instance that is a type compatible with
this configuration or this configurations superclasses.
:param **kwargs: The configurat | ion parameters.
"""
self._kwargs = kwargs
self._kwargs['abstract'] = abstract
self.extends = extends
self.merges = merges
# Allow for configuration items that are directly constructed in memory. These can have an
# address directly assigned (vs. inferred from name + source file location) and we only require
# that if they do, their name - if also assigned, matches the address.
if self.address:
if self.name and self.name != self.address.target_name:
self.report_validation_error('Address and name do not match! address: {}, name: {}'
.format(self.address, self.name))
self._kwargs['name'] = self.address.target_name
self._hashable_key = None
@property
def name(self):
"""Return the name of this object, if any.
In general configuration objects need not be named, in which case they are generally embedded
objects; ie: attributes values of enclosing named configuration objects. Any top-level
configuration object, though, will carry a unique name (in the configuration object's enclosing
namespace) that can be used to address it.
:rtype: string
"""
return self._kwargs.get('name')
@property
def address(self):
"""Return the address of this object, if any.
In general configuration objects need not be identified by an address, in which case they are
generally embedded objects; ie: attributes values of enclosing named configuration objects.
Any top-level configuration object, though, will be identifiable via a unique address.
:rtype: :class:`pants.build_graph.address.Address`
"""
return self._kwargs.get('address')
@property
def type_alias(self):
"""Return the type alias this target was constructed via.
For a target read from a BUILD file, this will be target alias, like 'java_library'.
For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'.
The end result is that the type alias should be the most natural way to refer to this target's
type to the author of the target instance.
:rtype: string
"""
return self._kwargs.get('type_alias', type(self).__name__)
@property
def abstract(self):
"""Return `True` if this object has been marked as abstract.
Abstract objects are not validated. See: `validate_concrete`.
:rtype: bool
"""
return self._kwargs['abstract']
# It only makes sense to inherit a subset of our own fields (we should not inherit new fields!),
# our superclasses logically provide fields within this constrained set.
# NB: Since `Configuration` is at base an ~unconstrained struct, a superclass does allow for
# arbitrary and thus more fields to be defined than a subclass might logically support. We
# accept this hole in a trade for generally expected behavior when `Configuration` is subclassed
# in the style of constructors with named parameters representing the full complete set of
# expected parameters leaving **kwargs only for use by 'the system'; ie for `type_alias` and
# `address` plumbing for example.
#
# Of note is the fact that we pass a constraint type and not a concrete constraint value. This
# tells addressable to use `SuperclassesOf([Configuration instance's type])`, which is what we
# want. Aka, for `ConfigurationSubclassA`, the constraint is
# `SuperclassesOf(ConfigurationSubclassA)`.
#
@addressable(SuperclassesOf)
def extends(self):
"""Return the object this object extends, if any.
:rtype: Serializable
"""
@addressable(SuperclassesOf)
def merges(self):
"""Return the object this object merges in, if any.
:rtype: Serializable
"""
def _asdict(self):
return self._kwargs
def _extract_inheritable_attributes(self, serializable):
attributes = serializable._asdict().copy()
# Allow for un-named (embedded) objects inheriting from named objects
attributes.pop('name', None)
attributes.pop('address', None)
# We should never inherit special fields - these are for local book-keeping only.
for field in self._SPECIAL_FIELDS:
attributes.pop(field, None)
return attributes
def create(self):
if self.extends and self.merges:
self.report_validation_error('Can only inherit from one object. Both extension of {} and '
'merging with {} were requested.'
.format(self.extends.address, self.merges.address))
if self.extends:
attributes = self._extract_inheritable_attributes(self.extends)
attributes.update((k, v) for k, v in self._asdict().items()
if k not in self._SPECIAL_FIELDS and v is not None)
configuration_type = type(self)
return configuration_type(**attributes)
elif self.merges:
attributes = self._extract_inheritable_attributes(self.merges)
for k, v in self._asdict().items():
if k not in self._SPECIAL_FIELDS:
if isinstance(v, MutableMapping):
mapping = attributes.get(k) or {}
mapping.update(v)
attributes[k] = mapping
elif isinstance(v, MutableSequence):
sequence = attributes.get(k) or []
sequence.extend(v)
attributes[k] = sequence
elif v is not None:
attributes[k] = v
configuration_type = type(self)
return configuration_type(**attributes)
else:
return self
def validate(self):
if not self.abstract:
self.validate_concrete()
def report_validation_error(self, message):
"""Raises a properly identified validation error.
:param string message: An error message describing the validation error.
:raises: :class:`pants.engine.exp.objects.ValidationError`
"""
raise ValidationError(self.address, message)
def validate_concrete(self):
"""Subclasses can override to implement validation logic.
The object will be fully hydrated state and |
dation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@google.com>"
"""This implements the file finder flow.
This flow is the workhorse of filesystem operations.
"""
import collections
from rekall import plugin
from rekall_agent import flow
from rekall_agent import result_collections
from rekall_agent.client_actions import download
from rekall_agent.client_actions import files
from rekall_agent.flows import collect
from rekall_lib import serializer
from rekall_lib import utils
from rekall_lib.types import agent
class FileFilterCondition(serializer.SerializedObject):
"""Baseclass for all file filter conditions."""
def get_efilter_clause(self):
return "1"
class ModificationTimeCondition(FileFilterCondition):
schema = [
dict(name="min", type="epoch", default=None,
doc="Only select files that have an mtime after "
"this value."),
dict(name="max", type="epoch", default=None,
doc="Only select files that have an mtime before "
"this value."),
]
def get_efilter_clause(self):
result = []
if self.min:
result.append("Path.st_mtime > %s" % self.min)
if self.max:
result.append("Path.st_mtime < %s" % self.max)
return "(" + " and ".join(result) + ")"
class FileFinderFlow(collect.CollectFlow):
_collection_name = "file_finder_{timestamp}"
schema = [
dict(name="globs", repeated=True, user=True,
doc="Globs to search in client."),
dict(name="conditions", type=FileFilterCondition, repeated=True,
doc="One or more filter conditions to restrict results."),
dict(name="download", type="bool", user=True,
doc="Should we download the file?"),
dict(name="path_sep",
doc="Glob path separator"),
]
def validate(self):
super(FileFinderFlow, self).validate()
if not self.globs:
raise plugin.InvalidArgs("Some globs must be provided.")
def create_query(self, collection):
"""Make an efilter query from all the flow parameters.
Combines the high level FileFinder filter specifications to actionable
efilter query.
"""
# This code just saves some typing :-).
column_spec = collections.OrderedDict()
for x in collection.tables[0].columns:
column_spec[x.name] = "path.%s" % x.name
column_spec["dirname"] = "path.filename.dirname"
column_spec["filename"] = "path.filename.basename"
column_spec["st_mode_str"] = "str(path.st_mode)"
column_spec["st_uid"] = "path.st_uid.uid"
column_spec["st_gid"] = "path.st_gid.gid"
columns = ["%s as %s" % (v, k) for k, v in column_spec.items()]
result = (
"select %s from glob({globs}, path_sep: {path_sep})" %
",".join(columns))
# Filter conditions are specified.
if self.conditions:
parts = [x.get_efilter_clause() for x in self.conditions]
result += " where " + " and ".join(parts)
return dict(mode_live=result)
def generate_actions(self):
# Make a collection to store the result.
collection = files.StatEntryCollection.from_keywords(
| session=self._session,
location=self.get_location(),
)
location = None
| if self.download:
if self.is_hunt():
location = self._config.server.hunt_vfs_path_for_client(
self.flow_id, vfs_type="files",
path_template="{client_id}/{subpath}",
expiration=self.expiration())
else:
location = self._config.server.vfs_prefix_for_client(
self.client_id, vfs_type="files",
expiration=self.expiration())
yield download.GetFiles.from_keywords(
session=self._session,
query=self.create_query(collection),
query_parameters=dict(globs=self.globs,
path_sep=self.path_sep),
collection=collection,
location=location
)
class VFSIndex(result_collections.GenericSQLiteCollection):
"""The VFS index manages the VFS.
The VFS is constructed by merging one or more different StatEntryCollection
collections into a single coherent view. In order to know which
StatEntryCollection represents which specific directory we need a fast
lookup index - which is managed in this collection.
"""
_tables = [dict(
name="default",
# Each entry represents one StatEntryCollection().
columns=[
# The top level directory contained in this collection.
dict(name="dirname"),
# The end depth of this collection.
dict(name="end_depth", type="int"),
# The age of this collection.
dict(name="timestamp", type="epoch"),
# Where it is.
dict(name="location_path"),
]
)]
class ListDirectory(agent.Flow):
"""Maintain the client VFS view.
Rekall maintains a view of the client's filesystem called the VFS (Virtual
File System). The view is maintained by collecting stat() entries from the
client in many StatEntryCollection() collections and storing them in the
client's bucket namespace.
This flow (ListDirectory) is responsible for creating and managing these
collections into a unified VFS that can be browsed with the `vfs_ls` and
`vfs_cp` plugins.
"""
schema = [
dict(name="path", user=True,
doc="The name of the directory to list."),
dict(name="depth", type="int", default=1, user=True,
doc="If set we recursively list all directories."),
]
def get_location(self):
"""Work out where the agent should store the collection."""
if self.is_hunt():
return self._config.server.hunt_vfs_path_for_client(
self.flow_id, self.path, vfs_type="metadata",
expiration=self.expiration())
return self._config.server.vfs_path_for_client(
self.client_id, "%s/%s" % (self.path, self.flow_id),
expiration=self.expiration(), vfs_type="collections",
mode="w")
def validate(self):
super(ListDirectory, self).validate()
if not self.path:
raise plugin.InvalidArgs("Path must be set")
def generate_actions(self):
yield files.ListDirectoryAction.from_keywords(
session=self._session,
path=self.path,
depth=self.depth,
vfs_location=self.get_location(),
)
def post_process(self, tickets):
"""Post process the list directory collection.
We want to maintain an easier to navigate view of the client's VFS in
the client's namespace. We place a StatEntryCollection at each directory
location and write all the files within that directory.
"""
super(ListDirectory, self).post_process(tickets)
if self.is_hunt():
return
VFSIndex.transaction(
self._config.server.vfs_index_for_server(self.client_id),
self._update_vfs_index,
tickets,
session=self._session)
def _update_vfs_index(self, index_collection, tickets):
"""Extract all the directories and store them in the index."""
path = utils.normpath(self.path)
for ticket in tickets:
for collection in ticket.collections:
index_collection.in |
"""Custom exceptions for ExecutionContext package
"""
from generic_utils.exceptions import GenUtilsException
from generic_utils.exceptions import GenUtilsKeyError
from generic_utils.exceptions import GenUtilsRuntimeError
class ExecutionContextStackEmptyError(GenUtilsException):
"""Raised when stack is empty and blocks proper execution
"""
pass
class ExecutionContextValueDoesNotExist(GenUtilsKeyError):
"""Raised when attempting to get a value that does not exist in a backend"""
message = "Could not get key={key} from ExecutionContext."
k | ey = None
class ExecutionContextRuntimeError(GenUtilsRuntimeError):
"""Raised when ExecutionContextStack | can not recover from an unknown problem."""
message = "ExecutionContextStack could not complete operation due to reason={reason}."
reason = None
|
# decodex - simple enigma decoder.
#
# Copyright (c) 2013 Paul R. Tagliamonte <tag@pault.ag>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
def cleanup(what):
return what.strip().lower().replace("'", "")
def issubset(superstr, substr):
superstr = list(superstr)
for ch in substr:
if ch not in superstr:
return False
superstr.remove(ch)
return True
def strsub(superstr, substr):
superstr = list(superstr)
substr = list(substr)
for k in substr:
superstr.remove(k)
return "".join(superstr)
class Words(object):
| def __init__(self, dictionary):
self.path = "/usr/share/dict/%s" % (dictionary)
self.mapping = defaultdict(set)
self.word_hash = {}
self._build_map()
def _build_map(self):
for line in (cleanup(x) for x in open(self.path, 'r')):
self.word_hash[line] = line |
self.mapping["".join(sorted(line))].add(line)
def anagram(self, word, depth=2):
if depth == 0:
return
l_hash = "".join(sorted(word))
# OK. Let's start simple.
if l_hash in self.mapping:
for entry in self.mapping[l_hash]:
yield [entry]
# Meh, Let's do our best and find l_hash in r_hash.
for r_hash, entries in self.mapping.items():
if issubset(l_hash, r_hash):
leftover = strsub(l_hash, r_hash)
# OK. So, this is a word if we can match the rest.
for anagram in self.anagram(leftover, depth=(depth - 1)):
for entry in entries:
yield [entry] + anagram
|
, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from reviews_manager.models import ClinicalAnnotationStep
from rois_manager.models import Slice, Core, FocusRegion
class SliceAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
slice = models.ForeignKey(Slice, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='slice_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
high_grade_pin = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
chronic_inflammation = models.BooleanField(blank=False, null=False, default=False)
acute_inflammation = models.BooleanField(blank=False, null=False, default=False)
periglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
intraglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
stromal_inflammation = models.BooleanField(blank=False, null=False, default=False)
class Meta:
unique_together = ('slice', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.slice.get_focus_regions():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
focus_region=focus_region,
annotation_step=self.annotation_step
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
total_tumor_area = 0.0
for core in self.slice.cores.all():
total_tumor_area += core.get_total_tumor_area()
return total_tumor_area
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class CoreAnnotation(models.Model):
GLEASON_GROUP_WHO_16 = (
('GG1', 'GRADE_GROUP_1'), # gleason score <= 6
('GG2', 'GRADE_GROUP_2'), # gleason score 3+4=7
('GG3', 'GRADE_GROUP_3'), # gleason score 4+3=7
('GG4', 'GRADE_GROUP_4'), # gleason score 4+4=8 || 3+5=8 || 5+3=8
('GG5', 'GRADE_GROUP_5') # gleason score 9 or 10
)
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
core = models.ForeignKey(Core, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='core_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
primary_gleason = models.IntegerField(blank=False)
secondary_gleason = models.IntegerField(blank=False)
gleason_group = models.CharField(
max_length=3, choices=GLEASON_GROUP_WHO_16, blank=False
)
class Meta:
unique_together = ('core', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.core.focus_regions.all():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
annotation_step=self.annotation_step,
focus_region=focus_region
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
return self.core.get_total_tumor_area()
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_grade_group_text(self):
for choice in self.GLEASON_GROUP_WHO_16:
if choice[0] == self.gleason_group:
return choice[1]
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class FocusRegionAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
focus_region = models.ForeignKey(FocusRegion, on_delete=models.PROTECT,
blank=False, related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='focus_region_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
# cancerous region fields
perineural_involvement = models.BooleanField(blank=False, null=False, default=False)
intraductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
ductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
poorly_formed_glands = models.BooleanField(blank=False, null=False, default=False)
cribri | form_p | attern = models.BooleanField(blank=False, null=False, default=False)
small_cell_signet_ring = models.BooleanField(blank=False, null=False, default=False)
hypernephroid_pattern = models.BooleanField(blank=False, null=False, default=False)
mucinous = models.BooleanField(blank=False, null=False, default=False)
comedo_necrosis = models.BooleanField(blank=False, null=False, default=False)
# stressed region fields
inflammation = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
atrophic_lesions = models.BooleanField(blank=False, null=False, default=False)
adenosis = models.BooleanField(blank=False, null=False, default=False)
# ---
cellular_density_helper_json = models.TextField(blank=True, null=True)
cellular_density = models.IntegerField(blank=True, null=True)
cells_count = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = ('focus_region', 'annotation_step')
def get_total_gleason_4_area(self):
g4_area = 0
for g4 in self.get_gleason_4_elements():
g4_area += g4.area
return g4_area
def get_gleason_4_elements(self):
return self.gleason_elements.filter(gleason_type= |
def transform(dataset, XRANGE=None, YRANGE=None, ZRANGE=None):
"""Define this method for Python operators that
transform input scalars"""
import numpy as np
array = dataset.active_scalars
if array is None:
raise RuntimeError("No scalars found!")
# Transform the dataset.
result = np.co | py(array)
result[XRANGE[0]:XRANGE[1], YRANGE[0]:YRANGE[1], ZRANGE[0]:ZRANGE[1]] = 0
# Set the result as | the new scalars.
dataset.active_scalars = result
|
#
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Various operations acting on the tuples.
* Select fields from the stream: retain
* Remove fields from the stream: discard (not implemented in Cascading 1.2.*)
* Rename fields: rename
"""
__author__ = 'Gabor Szabo'
import itertools
from cascading.tuple import Fields
from cascading.operation import Identity
import cascading.pipe.assembly.Rename
from pycascading.pipe import SubAssembly, coerce_to_fields
from pycascading.each import Apply
def retain(*fields_to_keep):
"""Retain only the given fields.
The fields can be given in array or by separate parameters.
"""
if len(fields_to_keep) > 1:
f | ields_to_keep = list(it | ertools.chain(fields_to_keep))
else:
fields_to_keep = fields_to_keep[0]
return Apply(fields_to_keep, Identity(Fields.ARGS), Fields.RESULTS)
def _discard(fields_to_discard):
# In 2.0 there's a builtin function this, Discard
# In 1.2 there is nothing for this
raise Exception('Discard only works with Cascading 2.0')
def rename(*args):
"""Rename the fields to new names.
If only one argument (a list of names) is given, it is assumed that the
user wants to rename all the fields. If there are two arguments, the first
list is the set of fields to be renamed, and the second is a list of the
new names.
"""
if len(args) == 1:
(fields_from, fields_to) = (Fields.ALL, args[0])
else:
(fields_from, fields_to) = (args[0], args[1])
return SubAssembly(cascading.pipe.assembly.Rename, \
coerce_to_fields(fields_from), \
coerce_to_fields(fields_to))
|
# -*- coding: utf-8 -*-
"""
@file: tasks.py
@author: lyn
@contact: tonylu716@gmail.com
@python: 3.5
@editor: Vim
@create: 3/29/17 2:20 AM
@description:
用于反爬虫的一些异步任务,主要是刷新数据表中某些临时记录。
"""
from __future__ import absolute_import, unicode_literals
from celery import task as celery_task
from .models import Ban, RecentIpActivity
from django.utils import timezone
@celery_task(name="refresh_ban")
def refresh_ban():
clear_bans = []
for ban in Ban.objects.all():
if ban.ban_to < timezone.now():
ban.delete()
print("clear {} from Ban".format(ban.ip))
clear_bans.append(ban.ip)
return clear_bans
@celery_task(name="refresh_ip_activity")
def refresh_ip_activity():
clear_act_ips = []
for ip_activity in RecentIpActivity.objects.all():
if ip_activity.destroy_time < timezone.now():
ip_activity.delete()
print | ("clear {} acts from acti | vities".format(ip_activity.ip))
clear_act_ips.append(ip_activity.ip)
return clear_act_ips
|
import random as rd
class Pile:
def __init__(self, data=None):
if data:
self.data = [i for i in data]
else:
self.data = []
def __repr__(self):
max_sp = len(str(max(self.data)))
out = ""
for i in range(len(self.data) - 1, -1, -1):
out += "|{}|\n".format(self.get_fit(self.data[i], max_sp))
return out + "‾" * (max_sp + 2)
@staticmethod
def get_fit(elem, max_sp):
return str(elem) + ' ' * (max_sp - len(str(elem)))
def empiler(self, e):
self.data.append(e)
def depiler(self):
return self.data.pop()
def taille(self):
return len(self.data)
def __len__(self):
return len(self.data)
def multiplication(self):
p2 = Pile()
output = 1
for i in range(self.taille()):
elem = self.depiler()
output *= elem
p2.empiler(elem)
for i in range(p2.taille()):
self.empiler(p2.depiler())
return output
class DeuxPile:
def __init__(self, pile1: Pile, pile2: Pile):
self.p1 = pile1
self.p2 = pile2
def __repr__(self):
if self.p1.data:
max_sp1 = len(str(max(self.p1.data)))
else:
max_sp2 = len(str(max(self.p2.data)))
maxi = len(self.p2)
out = ""
for i in range(maxi - 1, -1, -1):
out += "{} |{}|\n".format(" " * 3, self.p2.get_fit(self.p2.data[i], max_sp2))
return out + "{} {}\n".format("‾" * 3, "‾" * (max_sp2 + 2))
if self.p2.data:
max_sp2 = len(str(max(self.p2.data)))
else:
maxi = len(self.p1)
out = ""
for i in range(maxi - 1, -1, -1):
out += "|{}| {}\n".format(self.p1.get_fit(self.p1.data[i], max_sp1), " " * 3)
return out + "{} {}\n".format("‾" * 3, "‾" * 3)
maxi = max([len(self.p1), len(self.p2)])
out = ""
for i in range(maxi - 1, -1, -1):
if i > len(self.p1) - 1:
out += "{} |{}|\n".format(" " * (max_sp1 + 2), self.p2.get_fit(self.p2.data[i], max_sp2))
elif i > len(self.p2) - 1:
out += "|{}| {}\n".format(self.p1.get_fit(self.p1.data[i], max_sp1), " " * (max_sp2 + 2))
else:
out += "|{}| |{}|\n".format(self.p1.get_fit(self.p1.data[i], max_sp1),
self.p2.get_fit(self.p2.data[i], max_sp2))
return out + "{} {}\n".format("‾" * (max_sp1 + 2), "‾" * (max_sp2 + 2))
def separation(self):
print(self)
temp = Pile()
for i in range(len(self.p1)):
elem = self.p1.depiler()
if elem % 2 == 0:
temp.empiler(elem)
else:
self.p2.empiler(elem)
print(self)
for i in range(len(self.p2)):
elem = self.p2.depiler()
if elem % 2 == 0:
temp.empiler(elem)
else:
self.p1.empiler(elem)
print(self)
for i in range(len(temp)):
self.p2.empiler(temp.depiler())
print(self)
# pile = Pile([1, 2, 3, 4])
# print(multiplication(pile))
# print(pile)
#
# p1 = Pile([rd.randint(0, 9) for _ in range(5)])
# p2 = Pile([rd.randint(0, 9) for _ in range(5)])
# two_pile = DeuxPile(p1, p2)
# two_pile.separation()
#
# def suite_newton(r, n):
# if n == 0:
# return r
# prec = suite_newton(r, n - 1)
# return (prec + (r / prec)) / 2
#
#
# def sqrt_newton(r, error):
# n = 0
# racine = suite_newton(r, n)
# racine_carre = racine * racine
# while not - error < r - racine_carre < error:
# n += 1
# racine = suite_newton(r, n)
# racine_ | carre = racine * racine
# print("{} -> {}".format(n, racine))
# return racine
#
#
# # print(suite_newton | (3, 8))
# # sqrt_newton(3, 0.01)
#
# def dichoto(r, error):
# mini = 0
# maxi = r
# racine = (maxi + mini) / 2
# racine_carre = racine * racine
# while not -error < r - racine_carre < error:
# if racine * racine > r:
# maxi = racine
# if racine * racine < r:
# mini = racine
# print(racine)
# racine = (maxi + mini) / 2
# racine_carre = racine * racine
# return racine
#
#
# dichoto(3, 0.01)
#
#
# def average(reads):
# sum = 0
# for read in reads:
# sum += len(read)
# return sum / len(reads)
#
#
# print(average(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def threshold(reads):
# moyenne = average(reads)
# output = []
# for read in reads:
# if len(read) >= moyenne:
# output.append(read)
# return output
#
#
# print(threshold(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def count_nucl(seq: str, symbol: str):
# output = 0
# for nucl in seq:
# if nucl == symbol:
# output += 1
# return output
#
#
# print(count_nucl("AGGCT", "G"))
#
#
# def ratio_gc(reads: list):
# list_gc = []
# for read in reads:
# counter = 0
# for nucl in read:
# if nucl == "G" or nucl == "C":
# counter += 1
# list_gc.append(counter / len(read))
# somme = 0
# for gc in list_gc:
# somme += gc
# return somme / len(list_gc)
#
#
# print(ratio_gc(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def remove_ends(reads, adaptor: str):
# output = []
# for read in reads:
# if read[:len(adaptor)] == adaptor:
# output.append(read[len(adaptor):])
# if read[-len(adaptor):] == adaptor:
# output.append(read[:-len(adaptor)])
# return output
#
#
# print(remove_ends(["TTTCAGGCT", "GGATTTTC", "TTTCGGCAAA"], "TTTC"))
def pre_sup(center: str):
return ["__"] + [center] * 4 + ["__"]
def tableau():
tab = [pre_sup("-")]
for i in range(3):
tab.append(pre_sup("0"))
tab.append(pre_sup("1"))
tab.append(pre_sup("-"))
for i in tab:
print(" ".join(i))
# tableau()
def molecule(counter):
s = 1
pas = 3
for n in range(pas, counter, pas):
if n % 2 != 0:
s = s - n
print("n =", n)
print(s)
return s
print("Hello")
print("Give a value")
info = int(input())
total = molecule(info)
print(total)
def till_0():
test = 16
liste = []
while test != 0:
test = int(input("n = "))
liste.append(test)
for i in liste[:-2]:
if i > liste[-2]:
print(i)
# till_0()
def add_seq():
dico = {}
print("Identifiant:")
dico["id"] = input()
print("Sequence ADN:")
dico["seq"] = input()
dico["len"] = len(dico["seq"])
print("Liste gene ( format ==> a b c d):")
dico["gene"] = input().split()
data.append(dico)
def show_list_gene():
dico_gene = {}
for seq in data:
for gene in seq["gene"]:
if gene in dico_gene:
dico_gene[gene] += [seq["seq"]]
else:
dico_gene[gene] = [seq["seq"]]
for key in dico_gene:
print("{} : {}".format(key, " ".join(dico_gene[key])))
def map_kinase():
global data
data = []
while True:
print("1 --> Ajouter une séquence")
print("2 --> Afficher info d'une espèce")
print("3 --> Afficher liste des gènes et séquences associées")
print("4 --> Exit")
choix = input()
if choix == "1":
add_seq()
elif choix == "2":
pass
elif choix == "3":
show_list_gene()
elif choix == "4":
exit()
# map_kinase()
|
import os
from rbm import RBM
from au import AutoEncoder
import tensorflow as tf
import input_data
from utilsnn import show_image, min_max_scale
import matplotlib.pyplot as plt
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')
flags.DEFINE_integer('epochs', 50, 'The number of training epochs')
flags.DEFINE_integer('batchsize', 30, 'The batch size')
flags.DEFINE_boolean('restore_rbm', False, 'Whether to restore the RBM weights or not.')
# ensure output dir exists
if not os.path.isdir('out'):
os.mkdir('out')
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX, teY = min_max_scale(trX, teX)
# RBMs
rbmobject1 = RBM(784, 900, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.3)
rbmobject2 = RBM(900, 500, ['rbmw2', 'rbvb2', 'rbmhb2'], 0.3)
rbmobject3 = RBM(500, 250, ['rbmw3', 'rbvb3', 'rbmhb3'], 0.3)
rbmobject4 = RBM(250, 2, ['rbmw4', 'rbvb4', 'rbmhb4'], 0.3)
if FLAGS.restore_rbm:
rbmobject1.restore_weights('./out/rbmw1.chp')
rbmobject2.restore_weights('./out/rbmw2.chp')
rbmobject3.restore_weights('./out/rbmw3.chp')
rbmobject4.restore_weights('./out/rbmw4.chp')
# Autoencoder
autoencoder = AutoEncoder(784, [900, 500, 250, 2], [['rbmw1', 'rbmhb1'],
['rbmw2', 'rbmhb2'],
['rbmw3', 'rbmhb3'],
['rbmw4', 'rbmhb4']], tied_weights=False)
iterations = len(trX) / FLAGS.batchsize
# Train First RBM
print('first rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
rbmobject1.partial_fit(batch_xs)
print(rbmobject1.compute_cost(trX))
show_image("out/1rbm.jpg", rbmobject1.n_w, (28, 28), (30, 30))
rbmobject1.save_weights('./out/rbmw1.chp')
# Train Second RBM2
print('second rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
# Transform features with first rbm for second rbm
batch_xs = rbmobject1.transform(batch_xs)
rbmobject2.partial_fit(batch_xs)
print(rbmobject2.compute_cost(rbmobject1.transform(trX)))
show_image("out/2rbm.jpg", rbmobject2.n_w, (30, 30), (25, 20))
rbmobject2.save_weights('./out/rbmw2.chp')
# Train Third RBM
print('third rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
# Transform features
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
batch_xs = rbmobject1.transform(batch_xs)
batch_xs = rbmobject2.transform(batch_xs)
rbmobject3.partial_fit(batch_xs)
print(rbmobject3.compute_cost(rbmobject2.transform(rbmobject1.transform(trX))))
show_image("out/3rbm.jpg", rbmobject3.n_w, (25 | , 20), (25, 10))
rbmobject3.save_weights('./out/rbmw3.chp')
# Train Third RBM
print('fourth rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
# Transform features
batch_xs = rbmobject1.transform(batch_xs)
batch_xs = rbmobject2.transform(batch_xs)
batch_xs = rbmobject3.transform(batch_xs)
rbmobject4.partial_fit(batch_xs)
print(r | bmobject4.compute_cost(rbmobject3.transform(rbmobject2.transform(rbmobject1.transform(trX)))))
rbmobject4.save_weights('./out/rbmw4.chp')
# Load RBM weights to Autoencoder
autoencoder.load_rbm_weights('./out/rbmw1.chp', ['rbmw1', 'rbmhb1'], 0)
autoencoder.load_rbm_weights('./out/rbmw2.chp', ['rbmw2', 'rbmhb2'], 1)
autoencoder.load_rbm_weights('./out/rbmw3.chp', ['rbmw3', 'rbmhb3'], 2)
autoencoder.load_rbm_weights('./out/rbmw4.chp', ['rbmw4', 'rbmhb4'], 3)
# Train Autoencoder
print('autoencoder')
for i in range(FLAGS.epochs):
cost = 0.0
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
cost += autoencoder.partial_fit(batch_xs)
print(cost)
autoencoder.save_weights('./out/au.chp')
autoencoder.load_weights('./out/au.chp')
fig, ax = plt.subplots()
print(autoencoder.transform(teX)[:, 0])
print(autoencoder.transform(teX)[:, 1])
plt.scatter(autoencoder.transform(teX)[:, 0], autoencoder.transform(teX)[:, 1], alpha=0.5)
plt.show()
raw_input("Press Enter to continue...")
plt.savefig('out/myfig')
|
from django.shortcuts import render
from moth.views.base.vulnerable_template_view import VulnerableTemplateView
class EchoHeadersView(VulnerableTemplateView):
description = title = 'Echoes all request headers'
url_path = 'echo-headers.py'
| KNOWN_HEADERS = ('CONTENT_LENGTH',)
def is_http_header(self, hname):
return hname.startswith('HTTP_') or hname in self.KNOWN_HEADERS
def translate_header(self, hname):
hname = hname.replace('HTTP_', '')
hname = hname.replace('_', '-')
hname = hname.lower()
hname = hname.title()
return hname
def get(self, request, *args, **kwds):
context = self.ge | t_context_data()
html = ''
msg_fmt = 'Header "%s" with value "%s" <br/>\n'
for hname in request.META:
if self.is_http_header(hname):
html += msg_fmt % (self.translate_header(hname),
request.META[hname])
context['html'] = html
return render(request, self.template_name, context)
|
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import signal
from . import base
from testrunner.local import utils
class SignalProc(base.TestProcObserver):
def __init__(self):
super(SignalProc, self).__init__()
self.exit_code = utils.EXIT_CODE_PASS
def setup(self, *args, **kwargs):
super(Signal | Proc, self).setup(*args, **kwargs)
# It should be called after processors are chained together to not loose
# catched signal.
signal.signal(signal.SIGINT, self._on_ctrlc)
signal.signal(signal.SIGTERM, self._on_sigterm)
def _on_ctrlc(self, _signum, _stack_frame):
print('>>> Ctrl-C detected, early abort...')
self.exit_code = utils.EXIT_CODE_INTERRUPTED
self.stop()
def _on_sigterm(self, _signum, _stack_frame):
print('>>> SIGTERM received, early abort...')
| self.exit_code = utils.EXIT_CODE_TERMINATED
self.stop()
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "Li | cense");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governin | g permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class SeriesActors(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SeriesActors - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[SeriesActorsData]'
}
self.attribute_map = {
'data': 'data'
}
self._data = None
@property
def data(self):
"""
Gets the data of this SeriesActors.
:return: The data of this SeriesActors.
:rtype: list[SeriesActorsData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this SeriesActors.
:param data: The data of this SeriesActors.
:type: list[SeriesActorsData]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import random
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
_largesize = 300
def __init__(self, head):
self.head = head
self.lsize = 0
while head.next:
head = head.next
self.lsize += 1
self.m1_idx = None
self.m2_idx = None
if self.lsize > self._largesize:
self.m1_idx = self.lsize / 3 # start from 1/3
self.m1 = self._getN(self.m1_idx)
self.m2_idx = self.m1_idx * 2 # start from 2/3
self.m2 = self._getN(self.m2_idx)
def _getN(self, n):
n -= 1
p = self.head
while n:
p = p.next
n -= 1
return p
def getRandom(self):
def _get(delta, start):
p = start
while delta:
p = p.next
delta -= 1
return p.val
| nextpos = random.randint(0, self.lsize)
| if not self.m1_idx:
return _get(nextpos, self.head)
if nextpos < self.m1_idx:
val = _get(nextpos, self.head)
elif nextpos < self.m2_idx:
val = _get(nextpos - self.m1_idx, self.m1)
else:
val = _get(nextpos - self.m2_idx, self.m2)
return val
|
from setuptools import setup
from os import path, environ
from sys import argv
here = path.abspath(path.dirname(__file__))
try:
if argv[1] == "test":
environ['PYTHONPATH'] = here
except IndexError:
pass
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='libfs',
version='0.1',
description='Library Filesystem',
long_description=long_description,
author='Christof Hanke',
author_email='christof.hanke@induhviduals.de',
url='https://github.com/ya-induhvidual/libfs',
packages=['Libfs'],
license='MIT',
install_requires=['llfuse', 'mutagenx'], |
test_suite="test/test_all.py",
scripts=['scripts/libfs.py'],
| keywords='fuse multimedia',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Filesystems'
],
)
|
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published b | y the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Pub | lic License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Require Purchase Order Number',
'version': '13.0.1.1.0',
'category': 'Sales',
'sequence': 14,
'summary': '',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'sale_stock'
],
'data': [
'views/sale_order_views.xml',
'views/res_partner_views.xml',
'views/account_move_views.xml',
'views/stock_picking_views.xml'
],
'demo': [
],
'installable': False,
'auto_install': False,
'application': False,
}
|
# coding=utf-8
import unittest
from types import MethodType
from parameterized import parameterized
from six import StringIO
from conans.client.output import ConanOutput, colorama_initialize
from mock import mock
class ConanOutputTest(unittest.TestCase):
def test_blocked_output(self):
# https://github.com/conan-io/conan/issues/4277
stream = StringIO()
def write_raise(self, data):
write_raise.counter = getattr(write_raise, "counter", 0) + 1
if write_raise.counter < 2:
raise IOError("Stdout locked")
self.super_write(data)
stream.super_write = stream.write
stream.write = MethodType(write_raise, stream)
out = ConanOutput(stream)
| with mock.patch("time.sleep") as sleep:
out.write("Hello world")
sleep.assert_any_call(0.02)
self.assertEqual("Hello world", stream.getvalue())
@parameterized.expand([(False, {}),
(False, {"CONAN_COLOR_DISPLAY": "0"}),
(True, {"CONAN_COLOR_DISPLAY": "0"}),
(False, { | "PYCHARM_HOSTED": "1"}),
(True, {"PYCHARM_HOSTED": "1", "CONAN_COLOR_DISPLAY": "0"}),
(True, {"NO_COLOR": ""}),
(True, {"CLICOLOR": "0"}),
(True, {"CLICOLOR": "0", "CONAN_COLOR_DISPLAY": "1"}),
(False, {"CLICOLOR": "1"}),
(False, {"CLICOLOR_FORCE": "0"}),
(True,
{"CLICOLOR": "1", "CLICOLOR_FORCE": "1", "CONAN_COLOR_DISPLAY": "1",
"PYCHARM_HOSTED": "1", "NO_COLOR": "1"})])
def test_output_no_color(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert not colorama_initialize()
init.assert_not_called()
@parameterized.expand([(True, {}),
(False, {"CONAN_COLOR_DISPLAY": "1"}),
(True, {"CONAN_COLOR_DISPLAY": "1"}),
(True, {"CLICOLOR": "1"}),
(True, {"CLICOLOR_FORCE": "0"})])
def test_output_color(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert colorama_initialize()
init.assert_called_once_with()
@parameterized.expand([(False, {"PYCHARM_HOSTED": "1", "CONAN_COLOR_DISPLAY": "1"}),
(True, {"PYCHARM_HOSTED": "1"}),
(False, {"CLICOLOR_FORCE": "1"}),
(True, {"CLICOLOR_FORCE": "1", "CLICOLOR": "0"}),
(True, {"CLICOLOR_FORCE": "1", "CONAN_COLOR_DISPLAY": "0"})])
def test_output_color_prevent_strip(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert colorama_initialize()
init.assert_called_once_with(convert=False, strip=False)
|
ry:
# for Python2
from Tkinter import * ## notice capitalized T in Tkinter
import tkFileDialog, tkMessageBox
except ImportError:
# for Python3
from tkinter import * ## notice lowercase 't' in tkinter here
from tkinter import filedialog as tkFileDialog
from tkinter import messagebox as tkMessageBox
i | mport sys, os
from scipy.io.wavfile import read
import hps | Model_function
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
class HpsModel_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
def initUI(self):
choose_label = "Input file (.wav, mono and 44100 sampling rate):"
Label(self.parent, text=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation = Entry(self.parent)
self.filelocation.focus_set()
self.filelocation["width"] = 25
self.filelocation.grid(row=1,column=0, sticky=W, padx=10)
self.filelocation.delete(0, END)
self.filelocation.insert(0, '../../sounds/sax-phrase-short.wav')
#BUTTON TO BROWSE SOUND FILE
self.open_file = Button(self.parent, text="Browse...", command=self.browse_file) #see: def browse_file(self)
self.open_file.grid(row=1, column=0, sticky=W, padx=(220, 6)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE
self.preview = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation.get()), bg="gray30", fg="white")
self.preview.grid(row=1, column=0, sticky=W, padx=(306,6))
## HARMONIC MODEL
#ANALYSIS WINDOW TYPE
wtype_label = "Window type:"
Label(self.parent, text=wtype_label).grid(row=2, column=0, sticky=W, padx=5, pady=(10,2))
self.w_type = StringVar()
self.w_type.set("blackman") # initial value
window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window_option.grid(row=2, column=0, sticky=W, padx=(95,5), pady=(10,2))
#WINDOW SIZE
M_label = "Window size (M):"
Label(self.parent, text=M_label).grid(row=4, column=0, sticky=W, padx=5, pady=(10,2))
self.M = Entry(self.parent, justify=CENTER)
self.M["width"] = 5
self.M.grid(row=4,column=0, sticky=W, padx=(115,5), pady=(10,2))
self.M.delete(0, END)
self.M.insert(0, "601")
#FFT SIZE
N_label = "FFT size (N) (power of two bigger than M):"
Label(self.parent, text=N_label).grid(row=5, column=0, sticky=W, padx=5, pady=(10,2))
self.N = Entry(self.parent, justify=CENTER)
self.N["width"] = 5
self.N.grid(row=5,column=0, sticky=W, padx=(270,5), pady=(10,2))
self.N.delete(0, END)
self.N.insert(0, "1024")
#THRESHOLD MAGNITUDE
t_label = "Magnitude threshold (t) (in dB):"
Label(self.parent, text=t_label).grid(row=6, column=0, sticky=W, padx=5, pady=(10,2))
self.t = Entry(self.parent, justify=CENTER)
self.t["width"] = 5
self.t.grid(row=6, column=0, sticky=W, padx=(205,5), pady=(10,2))
self.t.delete(0, END)
self.t.insert(0, "-100")
#MIN DURATION SINUSOIDAL TRACKS
minSineDur_label = "Minimum duration of sinusoidal tracks:"
Label(self.parent, text=minSineDur_label).grid(row=7, column=0, sticky=W, padx=5, pady=(10,2))
self.minSineDur = Entry(self.parent, justify=CENTER)
self.minSineDur["width"] = 5
self.minSineDur.grid(row=7, column=0, sticky=W, padx=(250,5), pady=(10,2))
self.minSineDur.delete(0, END)
self.minSineDur.insert(0, "0.1")
#MAX NUMBER OF HARMONICS
nH_label = "Maximum number of harmonics:"
Label(self.parent, text=nH_label).grid(row=8, column=0, sticky=W, padx=5, pady=(10,2))
self.nH = Entry(self.parent, justify=CENTER)
self.nH["width"] = 5
self.nH.grid(row=8, column=0, sticky=W, padx=(215,5), pady=(10,2))
self.nH.delete(0, END)
self.nH.insert(0, "100")
#MIN FUNDAMENTAL FREQUENCY
minf0_label = "Minimum fundamental frequency:"
Label(self.parent, text=minf0_label).grid(row=9, column=0, sticky=W, padx=5, pady=(10,2))
self.minf0 = Entry(self.parent, justify=CENTER)
self.minf0["width"] = 5
self.minf0.grid(row=9, column=0, sticky=W, padx=(220,5), pady=(10,2))
self.minf0.delete(0, END)
self.minf0.insert(0, "350")
#MAX FUNDAMENTAL FREQUENCY
maxf0_label = "Maximum fundamental frequency:"
Label(self.parent, text=maxf0_label).grid(row=10, column=0, sticky=W, padx=5, pady=(10,2))
self.maxf0 = Entry(self.parent, justify=CENTER)
self.maxf0["width"] = 5
self.maxf0.grid(row=10, column=0, sticky=W, padx=(220,5), pady=(10,2))
self.maxf0.delete(0, END)
self.maxf0.insert(0, "700")
#MAX ERROR ACCEPTED
f0et_label = "Maximum error in f0 detection algorithm:"
Label(self.parent, text=f0et_label).grid(row=11, column=0, sticky=W, padx=5, pady=(10,2))
self.f0et = Entry(self.parent, justify=CENTER)
self.f0et["width"] = 5
self.f0et.grid(row=11, column=0, sticky=W, padx=(265,5), pady=(10,2))
self.f0et.delete(0, END)
self.f0et.insert(0, "5")
#ALLOWED DEVIATION OF HARMONIC TRACKS
harmDevSlope_label = "Max frequency deviation in harmonic tracks:"
Label(self.parent, text=harmDevSlope_label).grid(row=12, column=0, sticky=W, padx=5, pady=(10,2))
self.harmDevSlope = Entry(self.parent, justify=CENTER)
self.harmDevSlope["width"] = 5
self.harmDevSlope.grid(row=12, column=0, sticky=W, padx=(285,5), pady=(10,2))
self.harmDevSlope.delete(0, END)
self.harmDevSlope.insert(0, "0.01")
#DECIMATION FACTOR
stocf_label = "Stochastic approximation factor:"
Label(self.parent, text=stocf_label).grid(row=13, column=0, sticky=W, padx=5, pady=(10,2))
self.stocf = Entry(self.parent, justify=CENTER)
self.stocf["width"] = 5
self.stocf.grid(row=13, column=0, sticky=W, padx=(210,5), pady=(10,2))
self.stocf.delete(0, END)
self.stocf.insert(0, "0.2")
#BUTTON TO COMPUTE EVERYTHING
self.compute = Button(self.parent, text="Compute", command=self.compute_model, bg="dark red", fg="white")
self.compute.grid(row=14, column=0, padx=5, pady=(10,2), sticky=W)
#BUTTON TO PLAY SINE OUTPUT
output_label = "Sinusoidal:"
Label(self.parent, text=output_label).grid(row=15, column=0, sticky=W, padx=5, pady=(10,0))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel_sines.wav'), bg="gray30", fg="white")
self.output.grid(row=15, column=0, padx=(80,5), pady=(10,0), sticky=W)
#BUTTON TO PLAY STOCHASTIC OUTPUT
output_label = "Stochastic:"
Label(self.parent, text=output_label).grid(row=16, column=0, sticky=W, padx=5, pady=(5,0))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel_stochastic.wav'), bg="gray30", fg="white")
self.output.grid(row=16, column=0, padx=(80,5), pady=(5,0), sticky=W)
#BUTTON TO PLAY OUTPUT
output_label = "Output:"
Label(self.parent, text=output_label).grid(row=17, column=0, sticky=W, padx=5, pady=(5,15))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel.wav'), bg="gray30", fg="white")
self.output.grid(row=17, column=0, padx=(80,5), pady=(5,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def browse_file(self):
self.filename = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation.delete(0, END)
self.filelocation.insert(0,self.filename)
def compute_model(self):
try:
inputFile = self.filelocation.get()
window = self.w_type.get()
M = int(self.M.get())
N = int(self.N.get())
t = int(self.t.get())
minSineDur = float(self.minSineDur.get())
nH = int(self.nH.get())
minf0 = int(self.minf0.get())
maxf0 = int(self.maxf0.get())
f0et = int(self.f0et.get())
harmDevSlope = float(self.harmDevSlope.get())
stocf = float(self.stocf.get( |
number = input()
number_array = [(int)(x) for x in raw_input().s | plit()]
total = 0
for i in range(1, number):
for j in range(i):
ii = number_array[i]
jj = number_a | rray[j]
if ii < jj:
total += i - j
number_array = number_array[:j] + [ii] + [jj] + number_array[j+1:i] + number_array[i+1:]
break
print total
|
import json
import requests
import time
import csv
from datetime import datetime
#------------------------------------------------------------- | ---------
def login(client_id, client_secret, username, password):
"""logs into reddit using Oauth2"""
client_auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
post_data = {"grant_type": "password", "username": username,
"password": password}
response = requests.post("https://www.reddit.com/api/v1/access_token",
| auth=client_auth, data=post_data)
print response
token_json = response.json()
headers = {"Authorization": "%s %s" % (token_json["token_type"],
token_json["access_token"]),
"User-Agent": user_agent}
return headers
#----------------------------------------------------------------------
def subredditInfo(sr, limit=100, sorting='top',
user_agent="ChicagoSchool's scraper", **kwargs):
"""retrieves X (max 100) amount of stories in a subreddit
'sorting' is whether or not the sorting of the reddit should be customized or not,
if it is: Allowed passing params/queries such as t=hour, week, month, year or all"""
#query to send
parameters = {'limit': limit,}
parameters.update(kwargs)
url = 'http://www.reddit.com/r/%s/%s.json?limit=%d' % (sr, sorting, limit)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
#return list of stories
stories = []
for story in j['data']['children']:
stories.append(story)
return stories
#----------------------------------------------------------------------
def collapseComments(comment):
"""takes in a comment json object and collapses the text for children
into a list"""
if "body" in comment["data"].keys():
comment_list = [comment["data"]["body"].replace(",", "").replace("\n", " ").encode("utf-8")]
if type(comment["data"]["replies"]) is not unicode:
replies = comment["data"]["replies"]["data"]["children"]
for r in replies:
comment_list.extend(collapseComments(r))
return comment_list
else:
return []
#----------------------------------------------------------------------
def threadComments(link, limit=100, sorting='',
user_agent="ChicagoSchool's scraper", **kwargs):
"""gets X (max 100) amount of comments for a given thread."""
#query to send
parameters = {'limit': limit,}
parameters.update(kwargs)
url = 'http://www.reddit.com/%s/%s.json?limit=%d' % (link, sorting, limit)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
#return list of comments
if len(j) > 1:
comments = []
for c in j[1]["data"]["children"]:
comments.extend(collapseComments(c))
return comments
#----------------------------------------------------------------------
def fullRun(sr_list, ind, out_f):
"""iterates through a list of subreddits, gets their top threads and
the comments for the top posts"""
t_1 = datetime.now()
sr_comment_list = []
sr_ln = len(sr_list)
for i, sr in enumerate(sr_list[ind:]):
time.sleep(2)
try:
sr_info = subredditInfo(sr)
sr_comments = []
sr_info_ln = len(sr_info)
except Exception as e:
print e
time.sleep(300)
sr_info = subredditInfo(sr)
sr_comments = []
sr_info_ln = len(sr_info)
for j, l in enumerate(sr_info):
try:
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
except Exception as e:
print e
time.sleep(60)
try:
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
except Exception as e:
print e
time.sleep(300)
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
sr_str = " ".join(sr_comments)
out_d = open(out_f, "ab")
out_w = csv.writer(out_d)
out_w.writerow([sr, sr_str])
out_d.close()
|
a_str1 = "Craig McBean"
a_str2 = "Sheree-Annm Lewis-McBean"
a_str3 = 'Sheyenne Lewis'
a_str4 = raw_input("E | nter fourth Name: ")
print "{:>30}".format(a_str1)
print "{:>30}".format(a_str2)
print "{:>30}".format(a_str3)
print "{ | :>30}".format(a_str4)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific langua | ge governing permissions and limitations
# under the License.
import mock
from openstack.tests.unit import base
from openstack.identity.v2 import extension
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'alias': '1',
'description': '2',
'links': '3',
'name': '4',
'namespace': '5',
'updated': '2015-03-09T12:14:57.233772',
}
class TestExtension(base.TestCase):
def test_basic(self):
sot = extension.Extension()
self.assertEqual('extension', sot.resource_k | ey)
self.assertEqual('extensions', sot.resources_key)
self.assertEqual('/extensions', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = extension.Extension(**EXAMPLE)
self.assertEqual(EXAMPLE['alias'], sot.alias)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['namespace'], sot.namespace)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
def test_list(self):
resp = mock.Mock()
resp.body = {
"extensions": {
"values": [
{"name": "a"},
{"name": "b"},
]
}
}
resp.json = mock.Mock(return_value=resp.body)
session = mock.Mock()
session.get = mock.Mock(return_value=resp)
sot = extension.Extension(**EXAMPLE)
result = sot.list(session)
self.assertEqual(next(result).name, 'a')
self.assertEqual(next(result).name, 'b')
self.assertRaises(StopIteration, next, result)
|
#!/usr/bin/env/python
"""
We treat each silica atom as its own residue. As a consequence,
we have more than 10000 residues | in the system.
The silly PDB format only allows up to 9999 residues. We solve
this issue by manually creating a .gro file | , which allows for
up to 99999 residues
"""
from __future__ import print_function
import re
import MDAnalysis as mda
pdbf = 'SiO2carved_ovl1.5_protein_0.17.pdb'
# retrieve atom info
u = mda.Universe(pdbf)
GRO_FMT = ('{resid:>5d}{resname:<5s}{name:>5s}{id:>5d}'
'{pos[0]:8.3f}{pos[1]:8.3f}{pos[2]:8.3f}'
'\n')
gro = 'confined BSA, t= 0.0\n'
#natoml = '{:5d}\n'.format(len(u.atoms))
atoml = ''
iat=0
vals = dict()
last_resid = 0
for atom in u.atoms:
iat += 1
vals['id'] = atom.id
vals['name'] = atom.name
# residue name
vals['resname'] = atom.resname
if atom.name in ('SIO', 'OSI', 'OA'):
vals['resname'] = atom.name
elif atom.resname == 'SPC':
vals['resname'] = 'SOL'
# residue number
vals['resid'] = atom.resid
if vals['resname'] in ('SIO', 'OSI', 'OA'):
last_resid += 1
vals['resid'] = last_resid
else:
last_resid = atom.resid
vals['pos'] = atom.position/10.0 # from Angstroms to nm
atoml += GRO_FMT.format(**vals)
gro += '{:5d}\n'.format(iat)
gro += atoml
#retrieve the box size
pdb = open(pdbf).read()
RE_BOX = re.compile('CRYST1\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)')
xyz = [float(xi)/10.0 for xi in RE_BOX.search(pdb).groups()]
gro += ' {:9.4f} {:9.4f} {:9.4f}\n'.format(*xyz)
open('confinedBSA_0.gro', 'w').write(gro) |
lta) # [3, 6, 9, 12, 15]
start = 3
limit = 1
delta = -0.5
tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]
limit = 5
tf.range(limit) # [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the de | fault),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operati | on (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with len |
eline
The PatternCounter is responsible for creating a CountCombiner and a
DecompGenerator, then running the DecompGenerator, getting DPTable
objects from the CountCombiner for the decompositions, and returning
the final count from the whole graph.
"""
def __init__(self, G, multi, td_list, coloring, pattern_class=KPattern,
table_hints={}, decomp_class=CombinationsSweep,
combiner_class=InclusionExclusion, verbose=False,
big_component_file=None, tdd_file=None, dp_table_file=None,
colset_count_file=None):
"""
Create the CountCombiner and DecompGenerator objects
Arguments:
G: Host graph
H: Pattern graph
coloring: A (|H|+1)-centered coloring of G
pattern_class: The k-pattern class to use in dynamic programming
table_hints: probably-respected options for the DP table
decomp_class: DecompGenerator subclass
combiner_class: CountCombiner subclass
verbose: whether or not to print debugging information
"""
self.G = G
self.multi = multi
self.coloring = coloring
self.pattern_class = pattern_class
self.verbose = verbose
self.big_component_file = big_component_file
self.big_component = None
self.tdd_file = td | d_file
self.dp_table_file = dp_table_file
self.dp_table = None
self.colset_count_file = colset_count_file
self.combiners = [combiner_class(len(multi[idx]), coloring, table_hints, td=td_list[idx],
ex | ecdata_file=colset_count_file) for idx in range(len(multi))]
before_color_set_callbacks = [combiner.before_color_set for combiner in self.combiners]
after_color_set_callbacks = [combiner.after_color_set for combiner in self.combiners]
# TODO: calculate a lower bound on treedepth
self.decomp_generator = decomp_class(G, coloring, len(max(multi, key=len)),
min(td_list), len(min(multi, key=len)),
before_color_set_callbacks,
after_color_set_callbacks,
self.verbose)
def count_patterns_from_TDD(self, decomp, pat, idx):
"""
Count the number of occurrences of our pattern in the given treedepth
decomposition.
Arguments:
decomp: Treedepth decomposition of a graph
pat: The pattern that we are counting
idx: The index of our pattern in the multi-pattern list
"""
# Keep this table if the big component is the current component
keep_table = (self.big_component is decomp)
# Get a table object for this decomposition from the CountCombiner
table = self.combiners[idx].table(decomp)
# create a post order traversal ordering with a DFS to use in the DP
ordering = []
q = deque([decomp.root])
# print decomp.root, len(decomp),
# print [(i+1,self.coloring[i]) for i in decomp]
while q:
curr = q.pop()
ordering.append(curr)
if not decomp.hasLeaf(curr):
q.extend(reversed(decomp.children(curr)))
ordering.reverse()
# Perform dynamic programming on the treedepth decomposition in the
# post order traversal
computeLeaf = table.computeLeaf
computeInnerVertexSet = table.computeInnerVertexSet
computeInnerVertexSetCleanup = table.computeInnerVertexSetCleanup
computeInnerVertex = table.computeInnerVertex
pattern_class = self.pattern_class
# For each vertex in the TDD:
for v in ordering:
# If the vertex is a leaf
if decomp.hasLeaf(v):
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
# print " Pattern: ", pattern
computeLeaf(v, pattern, pat)
# If the vertex is internal:
else:
# Get counts for tuples of its children (join case)
for c_idx in range(2, len(decomp.children(v))+1):
leftChildren = tuple(decomp.children(v)[:c_idx])
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
# print " Pattern: ", pattern
computeInnerVertexSet(leftChildren, pattern, pat)
# Possibly clean up some unneeded data structures
computeInnerVertexSetCleanup(leftChildren, pat)
# Combine child counts (forget case)
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
computeInnerVertex(v, pattern, pat)
# leaf = G.leaves().pop()
# for pattern in patternClass.allPatterns(H, G.depth()):
# print G.isIsomorphism(leaf, pattern), pattern
# Get the total count for the whole TDD
trivialPattern = pattern_class(pat.nodes, None, pat)
retVal = table.lookup((decomp.root,), trivialPattern)
# if retVal > 0:
# print "Return value", retVal
# print table
# Keep the table if this tdd is the big component
if keep_table:
self.dp_table = table
return retVal
def count_patterns(self):
"""Count the number of occurrences of our pattern in our host graph."""
# Make a list to store counts of patterns specified
final_count = [0]*len(self.multi)
# For every TDD given to us by the decomposition generator
for tdd in self.decomp_generator:
# Remember the largest component we've seen if we're making
# visualization output
if self.big_component_file is not None:
if self.big_component is None:
self.big_component = tdd
elif len(self.big_component) < len(tdd):
self.big_component = tdd
# Count patterns in that TDD
for idx, pat in enumerate(self.multi):
count = self.count_patterns_from_TDD(tdd, pat, idx)
# Combine the count from the TDD
self.combiners[idx].combine_count(count)
# Populate the list of counts that will be returned
for idx in range(len(self.multi)):
final_count[idx] += self.combiners[idx].get_count()
# Write the largest component to a file
if self.big_component_file is not None:
from lib.graph.graphformats import write_edgelist
write_edgelist(self.big_component, self.big_component_file)
# Write the TDD of the largest component to a file
if self.tdd_file is not None:
for v in self.big_component.nodes:
parent = self.big_component.vertexRecords[v].parent
if parent is not None:
print >> self.tdd_file, v, parent
# Write the DP table for the largest component to a file
if self.dp_table_file is not None:
# Write the table in a machine-readable format
dp_table = self.dp_table.table
for v_tup in sorted(dp_table.keys()):
self.dp_table_file.write(str([v for v in v_tup]) + " {\n")
for pattern, count in sorted(dp_table[v_tup].iteritems()):
if count > 0:
self.dp_table_file.write("\t" + str(count) + "; ")
vString = [v for v in pattern.vertices]
bString = [str(v) + ":" + str(i) for v, i in
pattern.boundary.iteritems()]
bString = '[' + ', '.join(bString) + ']'
self.dp_table_file.write(
str(vString) + "; |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-10 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0028_auto_20170113_2133'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='address1' | ,
),
migrations.RemoveField(
model_name='profile',
name='address2',
),
migrations.RemoveField(
model_name='profile',
name='address3',
),
migrations.AddField(
model_name='profile',
| name='address',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
__version__ = '1.2.0'
from .oss_authorizers import OssA | uthorizer
from .oss_file_operation import OssFileOperation
from .oss_fs import OssFS
from .oss_fs_impl import | OssFsImpl
|
etypes
import os
import posixpath
import re
from time import time
from urlparse import urlsplit, urlunsplit
from werkzeug.exceptions import NotFound
from werkzeug.http import is_resource_modified, http_date
from spa.static.handlers import StaticHandler
from spa.utils import clean_path
class HashCache(object):
def __init__(self):
self.path_hashes = {}
self.contents = {}
def get_path_hash(self, path):
return self.path_hashes.get(path)
def set_path_hash(self, path, path_hash):
self.path_hashes[path] = path_hash
def get_contents(self, path):
return self.contents.get(path)
def set_contents(self, path, contents):
self.contents[path] = contents
class CacheBustingStaticHandler(StaticHandler):
css_url_patterns = (
(re.compile(r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", re.IGNORECASE),
"""url("{hashed_url}")"""),
(re.compile(r"""(@import\s*["']\s*(.*?)["'])""", re.IGNORECASE),
"""@import url("{hashed_url}")"""),
)
def __init__(self, app, req, params, directory, hash_cache, **kwargs):
self.hash_cache = hash_cache
return super(CacheBustingStaticHandler, self).__init__(
app, req, params, directory, **kwargs
)
def get(self, filepath):
unhashed_path, path_hash = parse_hashed_filepath(filepath)
if unhashed_path is None:
return NotFound()
if self.hash_cache.get_path_hash(unhashed_path) is None:
# compute hash, and cache it.
file = self.get_file(unhashed_path)
if file is None:
return NotFound()
try:
hash_str = get_hash(file.handle)
self.hash_cache.set_path_hash(unhashed_path, hash_str)
finally:
file.handle.close()
# If hash we were passed doesn't equal the one we've computed and
# cached, then 404.
if path_hash != self.hash_cache.get_path_hash(unhashed_path):
return NotFound()
# For CSS stylesheets only, we'll rewrite content so that url()
# functions will point to hashed filenames instead of unhashed. The
# rewritten CSS content will be kept in memory.
if mimetypes.guess_type(filepath)[0] == 'text/css':
return self.make_css_response(unhashed_path)
return super(CacheBustingStaticHandler, self).get(unhashed_path)
def make_css_response(self, filepath):
def resp(environ, start_response):
file = self.get_file(filepath)
try:
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(file.mtime, file.size, file.name)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=file.mtime):
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
contents = self.hash_cache.get_contents(filepath)
if contents is None:
contents = file.handle.read()
for pat, tpl in self.cs | s_url_patterns:
| converter = self.get_converter(tpl)
contents = pat.sub(converter, contents)
self.hash_cache.set_contents(filepath, contents)
headers.extend((
('Content-Type', file.mimetype),
('Content-Length', len(contents)),
('Last-Modified', http_date(file.mtime))
))
start_response('200 OK', headers)
return [contents]
finally:
file.handle.close()
return resp
def get_converter(self, tpl):
def converter(matchobj):
matched, url = matchobj.groups()
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return url
return tpl.format(hashed_url=self.convert_css_url(url))
return converter
def convert_css_url(self, css_url):
split_url = urlsplit(css_url)
url_path = split_url.path
if not url_path.startswith('/'):
abs_url_path = self.make_path_absolute(url_path)
else:
abs_url_path = posixpath.realpath(url_path)
prefix = self.get_url_prefix()
# now make the path as it would be passed in to this handler when
# requested from the web. From there we can use existing methods on the
# class to resolve to a real file.
_, _, content_filepath = abs_url_path.partition(prefix)
content_filepath = clean_path(content_filepath)
content_file_hash = self.hash_cache.get_path_hash(content_filepath)
if content_file_hash is None:
content_file = self.get_file(content_filepath)
if content_file is None:
return 'NOT FOUND: "%s"' % url_path
try:
content_file_hash = get_hash(content_file.handle)
finally:
content_file.handle.close()
parts = list(split_url)
parts[2] = add_hash_to_filepath(url_path, content_file_hash)
url = urlunsplit(parts)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in css_url:
parts = list(urlsplit(url))
if not parts[3]:
parts[2] += '?'
url = urlunsplit(parts)
return url
def get_url_prefix(self):
"""
Return the mount point for this handler. So if you had a route like
this:
('/foo/bar/static/<path:filepath>', 'foo', Handler)
Then this function should return '/foo/bar/static/'
"""
env = self.request.environ
filepath = self.params['filepath']
prefix, _, _ = (env['SCRIPT_NAME'] +
env['PATH_INFO']).rpartition(filepath)
return prefix
def make_path_absolute(self, path):
"""
Given a relative url found inside the CSS file we're currently serving,
return an absolute form of that URL.
"""
env = self.request.environ
pinfo = posixpath.dirname(env['PATH_INFO'])
return posixpath.realpath(env['SCRIPT_NAME'] + pinfo + '/' + path)
def parse_hashed_filepath(filename, hash_len=12):
"""
Given a name like '/static/my_file.deadbeef1234.txt', return a tuple of the file name
without the hash, and the hash itself, like this:
('/static/my_file.txt', 'deadbeef1234')
If no hash part is found, then return (None, None).
"""
pat = '^(?P<before>.*)\.(?P<hash>[0-9,a-f]{%s})(?P<after>.*?)$' % hash_len
m = re.match(pat, filename)
if m is None:
return None, None
parts = m.groupdict()
return '{before}{after}'.format(**parts), parts['hash']
def add_hash_to_filepath(filepath, hash_str):
path, filename = os.path.split(filepath)
root, ext = os.path.splitext(filename)
return os.path.join(path, "%s.%s%s" % (root, hash_str, ext))
def get_hash(lines, hash_len=12):
md5 = hashlib.md5()
for line in lines:
md5.update(line)
return md5.hexdigest()[:hash_len]
class SmartStatic(object):
"""
A factory for making CacheBustingStaticHandler instances that share a cache
instance.
"""
def __init__(self, directory):
self.directory = directory
self.hash_cache = HashCache()
def __call__(self, app, req, params, **kwargs):
return CacheBustingStaticHandler(app, req, params,
directory=self.directory,
hash_cache=se |
from __future__ import unicode_literals
import frappe, unittest
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frapp | e.website import render
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
class TestWebsite(unittest.TestCase):
def test_page_load(self):
set_request(method='POST', path='login')
response = render.render()
self.assertTrue(response.status_code, 200)
html = response.get_data()
self.assertTrue('/* login-css */' in html)
self.assertTrue('// log | in.js' in html)
self.assertTrue('<!-- login.html -->' in html)
|
idget: gtk widget
"""
GrampsTab.__init__(self, dbstate, uistate, track, name)
eventbox = Gtk.EventBox()
eventbox.add(widget)
self.pack_start(eventbox, True, True, 0)
self._set_label(show_image=False)
eventbox.connect('key_press_event', self.key_pressed)
self.show_all()
def is_empty(self):
"""
Override base class
"""
return False
#-------------------------------------------------------------------------
#
# EditNote
#
#-------------------------------------------------------------------------
class EditNote(EditPrimary):
def __init__(self, dbstate, uistate, track, note, callback=None,
callertitle = None, extratype = None):
"""Create an EditNote window. Associate a note with the window.
@param callertitle: Text passed by calling object to add to title
@type callertitle: str
@param extratype: Extra L{NoteType} values to add to the default types.
They are removed from the ignorelist of L{NoteType}.
@type extratype: list of int
"""
self.callertitle = callertitle
self.extratype = extratype
EditPrimary.__init__(self, dbstate, uistate, track, note,
dbstate.db.get_note_from_handle,
dbstate.db.get_note_from_gramps_id, callback)
def empty_object(self):
"""Return an empty Note object for comparison for changes.
It is used by the base class L{EditPrimary}.
"""
empty_note = Note();
if self.extratype:
empty_note.set_type(self.extratype[0])
return empty_note
def get_menu_title(self):
if self.obj.get_handle():
if self.callertitle :
title = _('Note: %(id)s - %(context)s') % {
'id' : self.obj.get_gramps_id(),
'context' : self.callertitle
}
else :
title = _('Note: %s') % self.obj.get_gramps_id()
else:
if self.callertitle :
title = _('New Note - %(context)s') % {
'context' : self.callertitle
}
else :
title = _('New Note')
return title
def get_custom_notetypes(self):
return self.dbstate.db.get_note_types()
def _local_init(self):
"""Local initialization function.
Perform basic initialization, including setting up widgets
and the glade interface. It is called by the base class L{EditPrimary},
and overridden here.
"""
self.top = Glade()
win = self.top.toplevel
self.set_window(win, None, self.get_menu_title())
self.setup_configs('interface.note', 700, 500)
vboxnote = self.top.get_object('vbox131')
notebook = self.top.get_object('note_notebook')
#recreate start page as GrampsTab
notebook.remove_page(0)
self.ntab = NoteTab(self.dbstate, self.uistate, self.track,
_('_No | te'), vboxnote)
self.track_ref_for_deletion("ntab")
self.build_interface()
def _setup_fields(self):
"""Get control widgets | and attach them to Note's attributes."""
self.type_selector = MonitoredDataType(
self.top.get_object('type'),
self.obj.set_type,
self.obj.get_type,
self.db.readonly,
custom_values=self.get_custom_notetypes(),
ignore_values=self.obj.get_type().get_ignore_list(self.extratype))
self.check = MonitoredCheckbox(
self.obj,
self.top.get_object('format'),
self.obj.set_format,
self.obj.get_format,
readonly = self.db.readonly)
self.gid = MonitoredEntry(
self.top.get_object('id'),
self.obj.set_gramps_id,
self.obj.get_gramps_id,
self.db.readonly)
self.tags = MonitoredTagList(
self.top.get_object("tag_label"),
self.top.get_object("tag_button"),
self.obj.set_tag_list,
self.obj.get_tag_list,
self.db,
self.uistate, self.track,
self.db.readonly)
self.priv = PrivacyButton(
self.top.get_object("private"),
self.obj, self.db.readonly)
def _connect_signals(self):
"""Connects any signals that need to be connected.
Called by the init routine of the base class L{EditPrimary}.
"""
self.define_ok_button(self.top.get_object('ok'), self.save)
self.define_cancel_button(self.top.get_object('cancel'))
self.define_help_button(self.top.get_object('help'),
WIKI_HELP_PAGE, WIKI_HELP_SEC)
def _connect_db_signals(self):
"""
Connect any signals that need to be connected.
Called by the init routine of the base class (_EditPrimary).
"""
self._add_db_signal('note-rebuild', self._do_close)
self._add_db_signal('note-delete', self.check_for_close)
def _create_tabbed_pages(self):
"""Create the notebook tabs and inserts them into the main window."""
notebook = self.top.get_object("note_notebook")
self._add_tab(notebook, self.ntab)
handles = self.dbstate.db.find_backlink_handles(self.obj.handle)
self.rlist = NoteBackRefList(self.dbstate,
self.uistate,
self.track,
handles)
self.backref_tab = self._add_tab(notebook, self.rlist)
self.track_ref_for_deletion("rlist")
self.track_ref_for_deletion("backref_tab")
self._setup_notebook_tabs(notebook)
def build_interface(self):
self.texteditor = self.top.get_object('texteditor')
self.texteditor.set_editable(not self.dbstate.db.readonly)
self.texteditor.set_wrap_mode(Gtk.WrapMode.WORD)
# create a formatting toolbar
if not self.dbstate.db.readonly:
vbox = self.top.get_object('container')
toolbar, self.action_group = self.texteditor.create_toolbar(
self.uistate.uimanager, self.window)
vbox.pack_start(toolbar, False, False, 0)
self.texteditor.set_transient_parent(self.window)
# setup initial values for textview and textbuffer
if self.obj:
self.empty = False
with self.texteditor.undo_disabled():
self.texteditor.set_text(self.obj.get_styledtext())
# Reset the undoable buffer:
self.texteditor.reset()
_LOG.debug("Initial Note: %s" % str(self.texteditor.get_text()))
else:
self.empty = True
def build_menu_names(self, person):
"""
Provide the information needed by the base class to define the
window management menu entries.
"""
return (_('Edit Note'), self.get_menu_title())
def _post_init(self):
self.texteditor.grab_focus()
def update_note(self):
"""Update the Note object with current value."""
if self.obj:
text = self.texteditor.get_text()
self.obj.set_styledtext(text)
_LOG.debug(str(text))
def close(self, *obj):
"""Called when cancel button clicked."""
self.update_note()
super().close()
def save(self, *obj):
"""Save the data."""
self.ok_button.set_sensitive(False)
self.update_note()
if self.object_is_empty():
ErrorDialog(_("Cannot save note"),
_("No data exists for this note. Please "
"enter data or cancel the edit."),
parent=self.window)
self.ok_button.set_sensitive(True)
return
(uses_dupe_id, id) = self._uses_duplicate_id()
if uses_dupe_id:
msg1 = _("Cannot save note. ID already exists.")
msg2 = _("You have attempted to use the existing Gramps ID with "
|
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WAboutDialog/IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Basic import *
from IOST_Config import *
import gtk
import gtk.glade
import gobject
#======================================================================
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder=None):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_WindowName = window_name
self.IOST_AboutDialog_ObjectName = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder | ()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDi | alog_Builder = main_builder
# self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
# self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
self.CreateObjsDictFromDict(self.IOST_AboutDialog_WindowName,
self.IOST_Objs[self.IOST_AboutDialog_WindowName],
self.IOST_AboutDialog_Builder,
0)
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_ObjectName].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][object_name].run()
self.IOST_Objs[window_name][object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_WindowName][ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_objectt_name].hide()
|
self.setGeometry(100, 150, 600, 400)
self.setWindowTitle('Configure Destinations')
# Let's make ourselves a nice little layout
conf_hbox = self.getChooser(self)
# Let's make the menubar
menubar = self.makeMenubar()
self.setMenuBar(menubar)
# Save changes?
save = self.makeSaveButton()
self.siteList = QtGui.QListWidget()
self.siteList.setSortingEnabled(True)
#self.siteList.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# Main vertical layout
vbox = QtGui.QVBoxLayout()
vbox.addLayout(conf_hbox)
vbox.addWidget(self.siteList)
#vbox.addStretch(1) # This allows us to not occupy the entire vertical space of the window.
vbox.addLayout(save)
centralWidget = QtGui.QWidget(self)
centralWidget.setLayout(vbox)
self.setCentralWidget(centralWidget)
if len(self._config._list) > 0: self.activateConfig(self.conf_list.currentText())
else: self.newConfig()
#################################################################################################################################
#################################################### UI Helpers #################################################################
#################################################################################################################################
# Makes the top line chooser/new box
def getChooser(self, widget):
conf_hbox = QtGui.QHBoxLayout()
conf_hbox.addWidget(QtGui.QLabel('Select Configuration'))
# First we create the label with all the configurations currently available
self.conf_list = QtGui.QComboBox(widget)
for this_config in self._config._list:
self.conf_list.addItem(this_config)
conf_hbox.addWidget(self.conf_list)
self.connect(self.conf_list, QtCore.SIGNAL('currentIndexChanged(const QString&)'), self.activateConfig)
# Populate the first config available
# And an "Add New" box
self.conf_newbutton = QtGui.QPushButton('New')
widget.connect(self.conf_newbutton, QtCore.SIGNAL('clicked()'), self.newConfig) #self, QtCore.SLOT('newConfig()'))
conf_hbox.addWidget(self.conf_newbutton)
conf_hbox.addStretch(1) # This makes the line not take up the entire width of the application
return conf_hbox
# Creates a menu bar and returns it to the caller - sadly they only look right if we're working with a QMainWindow
def makeMenubar(self):
# Make a new menubar
menubar = QtGui.QMenuBar()
# First menu entry - File, as always
file = menubar.addMenu('&File')
# Last file entry - as always
file.addSeparator()
exit = QtGui.QAction('E&xit', self)
exit.setShortcut('Ctrl+Q')
self.connect(exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
file.addAction(exit)
return menubar
# Returns a layout that contains a "Save Contents" button
def makeSaveButton(self):
hbox = QtGui.QHBoxLayout()
# The + and - buttons
addButton = QtGui.QPushButton(QtGui.QIcon('icons/add_16x16.png'), 'Add Entry')
self.connect(addButton, QtCore.SIGNAL('clicked()'), self.addEntry)
editButton = QtGui.QPushButton(QtGui.QIcon('icons/edit_16x16.png'), 'Edit Entry')
self.connect(editButton, QtCore.SIGNAL('clicked()'), self.editEntry)
delButton = QtGui.QPushButton(QtGui.QIcon('icons/delete_16x16.png'), 'Delete Entry')
self.connect(delButton, QtCore.SIGNAL('clicked()'), self.delEntry)
hbox.addWidget(addButton)
hbox.addWidget(editButton)
hbox.addWidget(delButton)
# Now the save button
hbox.addStretch(1)
saveButton = QtGui.QPushButton('Save Changes')
self.connect(saveButton, QtCore.SIGNAL('clicked()'), self.saveConfig)
hbox.addWidget(saveButton)
return hbox
# Listens for changes in the active configuration and will update the UI to reflect that
def activateConfig(self, config):
# Confirm that we want to discard the changes
if not self._confirmDiscardChanges():
return None
# Having attained that permission, let us proceed onward with great haste
try:
self.configObject = self._config.getConfig(str(config))
except Exception:
QtGui.QMessageBox.critical(self, 'Error', 'Error opening config file.')
self.configObject = None
self.siteList.clear()
if self.configObject != None:
for entry in self.configObject:
QtGui.QListWidgetItem(entry['name'] + '\t' + entry['destination'], self.siteList)
else:
self.configObject = []
# We don't have changes anymore
self.changes = False
self._configname = config
# We like sortings!
self.siteList.sortItems()
###############################################################################################################################
################################################### Listeners #################################################################
###############################################################################################################################
# Slot where the new button signal is connected
def newConfig(self):
# Confirm that it's OK for us to discard changes
if not self._confirmDiscardChanges(): return None
name, ok = QtGui.QInputDialog.getText(self, 'New Config', 'Name of new configuration', QtGui.QLineEdit.Normal, 'default')
name = name.simplified()
if ok and name != '':
self._configname = name
self.configObject = []
self.conf_list.addItem(name)
def saveConfig(self):
self._config.saveConfig(self._configname, self.configObject)
QtGui.QMessageBox.information(self, 'Saved', 'Configuration saved')
self.changes = False
# Displays a dialog that will allow the user to
# create a new element in the current configuration
def addEntry(self):
dialog = DomEditEntryDialog(self, None)
value = dialog.exec_()
# Only if the user really pushed the 'OK' or 'Enter' button/key
if value == QtGui.QDialog.Accepted:
name = dialog.getSiteName()
value = dialog.getSiteURL()
user = dialog.getUser()
pw = dialog.getPassword()
# Makes sure it doesn't duplicate the name of another site
duplicate = False
for element in self.configObject:
if element['name'] == name: duplicate = True
# Only proceed if we are in a va | lid place
if not duplicate:
self.configObject.append({'name' : str(name), 'destination' : str(value), 'user' : str(user), 'pw' : str(pw)})
# Displays in the dialog
QtGui.QListWidgetItem(name + '\t' + value, self.siteList)
# Flag the current entry as change | d
self.changes = True
# Sorting is fun!
self.siteList.sortItems()
else:
print 'Duplicate detected'
QtGui.QMessageBox.warning(self, 'Duplicate Detected', 'That entry already exists, ignoring.')
else:
print 'Rejecting'
def delEntry(self):
item = self.siteList.takeItem(self.siteList.currentRow())
text = str(item.text())
name, trash, url = text.partition('\t')
# Remove from our list
for obj in self.configObject:
if obj['name'] == name: self.configObject.remove(obj)
# Make sure we know there are changes pending
self.changes = True
def editEntry(self):
# Find out which one we're on
item = self.siteList.currentItem()
name, trash, url = str(item.text()).partition('\t')
entry = None
for obj in self.configObject:
if obj['name'] == name: entry = obj
# Create & show the dialog
dialog = DomEditEntryDialog(self, entry)
value = dialog.exec_()
# Process answers
if value == QtGui.QDialog.Accepted:
# Iterate over the configs
for obj in self.configObject:
if obj['name'] == name:
idx = self.configObject.index(obj)
self.configObject[idx]['name'] = str(dialog.getSiteName())
self.configObject[idx]['destination'] = str(dialog.getSiteURL())
self.configObject[idx]['user'] = str(dialog.getUser())
self.configObject[idx]['pw'] = str(dialog.getPassword())
item.setText(self.configObject[idx]['name'] + '\t' + self.configObject[idx]['destination'])
break
#########################################################################################################################################
##################################################### Other Helper Functions ############################################################
############################################################ |
from collections import OrderedDict
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy as _
from oscar.apps.search import facets
FACET_COUNTS = {
u'dates': {},
u'fields': {
'category': [('Fiction', 12), ('Horror', 6), ('Comedy', 3)],
'product_class': [('Book', 18), ('DVD', 3)],
'rating': [],
},
u'queries': {
u'price_exact:[0 TO 20]': 15,
u'price_exact:[20 TO 40]': 5,
u'price_exact:[40 TO 60]': 1,
u'price_exact:[60 TO *]': 0,
}
}
FACET_COUNTS_WITH_PRICE_RANGE_SELECTED = {
u'dates': {},
u'fields': {
'category': [('Fiction', 12), ('Horror', 6), ('Comedy', 3)],
'product_class': [('Book', 18), ('DVD', 3)],
'rating': [],
},
u'queries': {
u'price_exact:[0 TO 20]': 0,
u'price_exact:[20 TO 40]': 21,
u'price_exact:[40 TO 60]': 0,
u'price_exact:[60 TO *]': 0,
}
}
SEARCH_FACETS = {
'fields': OrderedDict([
('product_class', {'name': _('Type'), ' | field': 'product_class'}),
('rating', {'name': _('Rating'), 'field': 'rating'}),
('category', {'name': _('Category'), 'field': 'category'}),
]),
'queries': OrderedDict([
('price_range',
{
'name': _('Price range'),
'field': 'price',
'queries': [
(_('0 to 20'), u'[0 TO 20]'),
( | _('20 to 40'), u'[20 TO 40]'),
(_('40 to 60'), u'[40 TO 60]'),
(_('60+'), u'[60 TO *]'),
]
}),
]),
}
@override_settings(OSCAR_SEARCH_FACETS=SEARCH_FACETS)
class TestFacetMunger(TestCase):
def test_with_no_facets_selected(self):
munger = facets.FacetMunger(
path='/search?q=test',
selected_multi_facets={},
facet_counts=FACET_COUNTS)
data = munger.facet_data()
self.assertTrue('category' in data)
self.assertEqual(3, len(data['category']['results']))
# Check a sample facet dict has the right keys
datum = data['category']['results'][0]
for key in ('count', 'disabled', 'name', 'select_url',
'selected', 'show_count'):
self.assertTrue(key in datum)
self.assertEqual(datum['count'], 12)
self.assertEqual(datum['name'], 'Fiction')
self.assertFalse(datum['selected'])
def test_pagination_params_are_reset(self):
munger = facets.FacetMunger(
path='/search?q=test&page=2',
selected_multi_facets={},
facet_counts=FACET_COUNTS)
data = munger.facet_data()
# Check a sample facet dict has the right keys
for facet_data in data.values():
for result in facet_data['results']:
self.assertTrue('page' not in result['select_url'])
def test_with_price_facets_selected(self):
munger = facets.FacetMunger(
path='/search?q=test&selected_facets=price_exact%3A%5B20+TO+40%5D',
selected_multi_facets={'price_exact': [u'[20 TO 40]']},
facet_counts=FACET_COUNTS_WITH_PRICE_RANGE_SELECTED)
data = munger.facet_data()
self.assertTrue('price_range' in data)
self.assertEqual(4, len(data['price_range']['results']))
# Check a sample facet dict has the right keys
datum = data['price_range']['results'][1]
for key in ('count', 'disabled', 'name', 'deselect_url',
'selected', 'show_count'):
self.assertTrue(key in datum)
self.assertEqual(datum['count'], 21)
self.assertTrue(datum['selected'])
|
#
| # Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException
from ttypes | import *
|
.extmath import logsumexp
from .utils.fixes import unique
from .utils import check_arrays, array2d
__all__ = ['LDA']
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X, y = check_arrays(X, y, sparse_format='dense')
self.classes_, y = unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.int | ercept_ = (-0.5 * np.sum(self.coef_ ** 2, axis=1) +
np.log(self.priors_))
| return self
@property
def classes(self):
warnings.warn("LDA.classes is deprecated and will be removed in 0.14. "
"Use LDA.classes_ instead.", DeprecationWarning,
stacklevel=2)
return self.classes_
def _decision_function(self, X):
X = array2d(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = array2d(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
# compute the likelihood of the underlying ga |
"""
simpleSetup2: runs POST the JS setup
NOTE: for 1.2 USERS (and their signatures) still done here. Next jsSetup will take this over and the User setup part
will be removed from here.
"""
import os
import sys
import logging
import time
sys.path = ['rasUtilities'] + sys.path
import OSEHRASetup
from OSEHRAHelper import ConnectToMUMPS, PROMPT
logging.basicConfig(level=logging.INFO,
#filename='debug.log',
#format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
"""
ConnectToMUMPS relies on environment:
- is GTM => defined os.getenv('gtm_dist') == /home/nodevista/lib/gtm
- is Linux => defined sys.platform == 'linux2'
"""
# print "Platform", sys.platform, "GT.M MUMPS VM", os.getenv('gtm_dist'), "GTM Prompt", os.getenv("gtm_prompt")
LOGFILE = '/home/nodevista/log/simpleSetup2.txt'
"""
Expect to be called from Shell - PRINT can be read with
result=`python simpleSetup.py`
if [ "$result" != "OK" ]; then ...
"""
def simpleSetup2():
try:
print "Connecting to MUMPS roll n scroll ..."
VistA=ConnectToMUMPS(LOGFILE)
except:
print "EXIT_PYS_CANT_CONNECT_TO_MUMPS"
return
# NB: simpleSetup and postImportSetupBasics should go too
try:
print "Now setting up Users (signatures only now) ..."
postImportSetupUsers(VistA)
except Exception as e:
print "EXIT_PYS_PROBLEM_SETTING_USERS_BUT_GOING_ON"
VistA=ConnectToMUMPS(LOGFILE)
try:
print "Now setting up Patients ..."
# have to reset VistA as Signature Setup halts from VISTA
time.sleep(10)
VistA=ConnectToMUMPS(LOGFILE) # reset up VISTA
postImportSetupPatients(VistA)
except:
print "EXIT_PYS_CANT_SETUP_PATIENTS"
return
print "Setup User, Patient ... Complete OK"
def postImportSetupUsers(VistA):
"""
Setup Users - paired down in v1.2. Now only resetting signatures.
"""
# Required to add Patient, User etc
OSEHRASetup.addSystemManager(VistA)
# Open FileMan and create the VistA Health Care institution
OSEHRASetup.addInstitution(VistA,"VISTA HEALTH CARE","999")
# Create the Medical Center Division of
# the VistA Health Care institution
OSEHRASetup.addDivision(VistA,'VISTA MEDICAL CENTER',"6101","999")
# The Sikuli test for CPRS orders a Streptozyme test for the patient
# This information ensures the test can be ordered at the VistA Health care
# Facility
OSEHRASetup.setupStrepTest(VistA)
OSEHRASetup.signonZU(VistA,"SM1234","SM1234!!")
"""
Note that these verifies are temporary - VISTA forces a reset which is done as part of
the electronic signature setups below. It's the reset signature that will be used from
now on
"""
OSEHRASetup.addDoctor(VistA,"ALEXANDER,ROBERT","RA","000000029","M","fakedoc1","2Doc!@#$")
#Enter the Nurse Mary Smith
OSEHRASetup.addNurse(VistA,'SMITH,MARY','MS','000000030','F','fakenurse1','2Nur!@#$')
# Add a clerk user with permissions for Problem List Data entry
OSEHRASetup.addClerk(VistA,"CLERK,JOE","JC","000000112","M","fakeclerk1","2Cle! | @#$")
# Add a Pharmacist
OSEHRASetup.addPharmacist(VistA,"SHARMA,FRED","FS","000000031","M","fakepharma1","2Pha!@#$");
#Create a new Order Menu
OSEHRASetup.c | reateOrderMenu(VistA)
#Give all users of the instance permission to mark allergies as "Entered in error')
OSEHRASetup.addAllergiesPermission(VistA)
#Give Mary Smith permission to create shared templates
OSEHRASetup.addTemplatePermission(VistA,"MS")
# Add clinic via the XUP menu to allow scheduling
OSEHRASetup.createClinic(VistA,'VISTA HEALTH CARE','VHC','M')
"""
The sleep and ConnectToMUMPS is needed as createClinic has halted and
setup signature does a similar thing. Could debug and stop the halts but
as replacing with JS, not worth it.
Same "logic" is in OSEHRA's PostImportSetupScript.py
"""
time.sleep(10)
VistA=ConnectToMUMPS(LOGFILE)
#Set up the Doctors electronic signature
OSEHRASetup.setupElectronicSignature(VistA,"fakedoc1",'2Doc!@#$','1Doc!@#$','ROBA123')
VistA=ConnectToMUMPS(LOGFILE)
# #Set up the Nurse electronic signature
OSEHRASetup.setupElectronicSignature(VistA,"fakenurse1","2Nur!@#$","1Nur!@#$","MARYS123")
VistA=ConnectToMUMPS(LOGFILE)
# #Set up the Clerk verification code
OSEHRASetup.setupElectronicSignature(VistA,"fakeclerk1","2Cle!@#$","1Cle!@#$","CLERKJ123")
def postImportSetupPatients(VistA):
# Add patient to the instance using the registration menu.
# Not using the Clerk user to avoid dropping the connection on the error when trying to connect to the MPI.
# and the Register a Patient menu option.
# The patient can be a veteran but not service connected
# Function arguments:
# VistA, Patient Name, Patient Sex,Patient DOB, Patient SSN, Patient Veteran?
OSEHRASetup.addPatient(VistA,'dataFiles/patdata0.csv')
def main():
simpleSetup2()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.